FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdint.h>
34 
35 #if HAVE_ISATTY
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 #endif
43 
44 #include "libavformat/avformat.h"
45 #include "libavdevice/avdevice.h"
47 #include "libavutil/opt.h"
49 #include "libavutil/parseutils.h"
50 #include "libavutil/samplefmt.h"
51 #include "libavutil/fifo.h"
52 #include "libavutil/intreadwrite.h"
53 #include "libavutil/dict.h"
54 #include "libavutil/mathematics.h"
55 #include "libavutil/pixdesc.h"
56 #include "libavutil/avstring.h"
57 #include "libavutil/libm.h"
58 #include "libavutil/imgutils.h"
59 #include "libavutil/timestamp.h"
60 #include "libavutil/bprint.h"
61 #include "libavutil/time.h"
63 #include "libavcodec/mathops.h"
64 #include "libavformat/os_support.h"
65 
66 # include "libavfilter/avcodec.h"
67 # include "libavfilter/avfilter.h"
68 # include "libavfilter/buffersrc.h"
69 # include "libavfilter/buffersink.h"
70 
71 #if HAVE_SYS_RESOURCE_H
72 #include <sys/time.h>
73 #include <sys/types.h>
74 #include <sys/resource.h>
75 #elif HAVE_GETPROCESSTIMES
76 #include <windows.h>
77 #endif
78 #if HAVE_GETPROCESSMEMORYINFO
79 #include <windows.h>
80 #include <psapi.h>
81 #endif
82 
83 #if HAVE_SYS_SELECT_H
84 #include <sys/select.h>
85 #endif
86 
87 #if HAVE_TERMIOS_H
88 #include <fcntl.h>
89 #include <sys/ioctl.h>
90 #include <sys/time.h>
91 #include <termios.h>
92 #elif HAVE_KBHIT
93 #include <conio.h>
94 #endif
95 
96 #if HAVE_PTHREADS
97 #include <pthread.h>
98 #endif
99 
100 #include <time.h>
101 
102 #include "ffmpeg.h"
103 #include "cmdutils.h"
104 
105 #include "libavutil/avassert.h"
106 
107 const char program_name[] = "ffmpeg";
108 const int program_birth_year = 2000;
109 
110 static FILE *vstats_file;
111 
112 const char *const forced_keyframes_const_names[] = {
113  "n",
114  "n_forced",
115  "prev_forced_n",
116  "prev_forced_t",
117  "t",
118  NULL
119 };
120 
121 static void do_video_stats(OutputStream *ost, int frame_size);
122 static int64_t getutime(void);
123 static int64_t getmaxrss(void);
124 
125 static int run_as_daemon = 0;
126 static int nb_frames_dup = 0;
127 static int nb_frames_drop = 0;
128 static int64_t decode_error_stat[2];
129 
130 static int current_time;
132 
134 
135 #define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass"
136 
141 
146 
149 
150 #if HAVE_TERMIOS_H
151 
152 /* init terminal so that we can grab keys */
153 static struct termios oldtty;
154 static int restore_tty;
155 #endif
156 
157 #if HAVE_PTHREADS
158 static void free_input_threads(void);
159 #endif
160 
161 /* sub2video hack:
162  Convert subtitles to video with alpha to insert them in filter graphs.
163  This is a temporary solution until libavfilter gets real subtitles support.
164  */
165 
167 {
168  int ret;
169  AVFrame *frame = ist->sub2video.frame;
170 
171  av_frame_unref(frame);
172  ist->sub2video.frame->width = ist->sub2video.w;
173  ist->sub2video.frame->height = ist->sub2video.h;
175  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
176  return ret;
177  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
178  return 0;
179 }
180 
181 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
182  AVSubtitleRect *r)
183 {
184  uint32_t *pal, *dst2;
185  uint8_t *src, *src2;
186  int x, y;
187 
188  if (r->type != SUBTITLE_BITMAP) {
189  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
190  return;
191  }
192  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle overflowing\n");
194  return;
195  }
196 
197  dst += r->y * dst_linesize + r->x * 4;
198  src = r->pict.data[0];
199  pal = (uint32_t *)r->pict.data[1];
200  for (y = 0; y < r->h; y++) {
201  dst2 = (uint32_t *)dst;
202  src2 = src;
203  for (x = 0; x < r->w; x++)
204  *(dst2++) = pal[*(src2++)];
205  dst += dst_linesize;
206  src += r->pict.linesize[0];
207  }
208 }
209 
210 static void sub2video_push_ref(InputStream *ist, int64_t pts)
211 {
212  AVFrame *frame = ist->sub2video.frame;
213  int i;
214 
215  av_assert1(frame->data[0]);
216  ist->sub2video.last_pts = frame->pts = pts;
217  for (i = 0; i < ist->nb_filters; i++)
221 }
222 
223 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
224 {
225  int w = ist->sub2video.w, h = ist->sub2video.h;
226  AVFrame *frame = ist->sub2video.frame;
227  int8_t *dst;
228  int dst_linesize;
229  int num_rects, i;
230  int64_t pts, end_pts;
231 
232  if (!frame)
233  return;
234  if (sub) {
235  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
236  AV_TIME_BASE_Q, ist->st->time_base);
237  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
238  AV_TIME_BASE_Q, ist->st->time_base);
239  num_rects = sub->num_rects;
240  } else {
241  pts = ist->sub2video.end_pts;
242  end_pts = INT64_MAX;
243  num_rects = 0;
244  }
245  if (sub2video_get_blank_frame(ist) < 0) {
247  "Impossible to get a blank canvas.\n");
248  return;
249  }
250  dst = frame->data [0];
251  dst_linesize = frame->linesize[0];
252  for (i = 0; i < num_rects; i++)
253  sub2video_copy_rect(dst, dst_linesize, w, h, sub->rects[i]);
254  sub2video_push_ref(ist, pts);
255  ist->sub2video.end_pts = end_pts;
256 }
257 
258 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
259 {
260  InputFile *infile = input_files[ist->file_index];
261  int i, j, nb_reqs;
262  int64_t pts2;
263 
264  /* When a frame is read from a file, examine all sub2video streams in
265  the same file and send the sub2video frame again. Otherwise, decoded
266  video frames could be accumulating in the filter graph while a filter
267  (possibly overlay) is desperately waiting for a subtitle frame. */
268  for (i = 0; i < infile->nb_streams; i++) {
269  InputStream *ist2 = input_streams[infile->ist_index + i];
270  if (!ist2->sub2video.frame)
271  continue;
272  /* subtitles seem to be usually muxed ahead of other streams;
273  if not, subtracting a larger time here is necessary */
274  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
275  /* do not send the heartbeat frame if the subtitle is already ahead */
276  if (pts2 <= ist2->sub2video.last_pts)
277  continue;
278  if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
279  sub2video_update(ist2, NULL);
280  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
281  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
282  if (nb_reqs)
283  sub2video_push_ref(ist2, pts2);
284  }
285 }
286 
287 static void sub2video_flush(InputStream *ist)
288 {
289  int i;
290 
291  if (ist->sub2video.end_pts < INT64_MAX)
292  sub2video_update(ist, NULL);
293  for (i = 0; i < ist->nb_filters; i++)
294  av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
295 }
296 
297 /* end of sub2video hack */
298 
299 static void term_exit_sigsafe(void)
300 {
301 #if HAVE_TERMIOS_H
302  if(restore_tty)
303  tcsetattr (0, TCSANOW, &oldtty);
304 #endif
305 }
306 
307 void term_exit(void)
308 {
309  av_log(NULL, AV_LOG_QUIET, "%s", "");
311 }
312 
313 static volatile int received_sigterm = 0;
314 static volatile int received_nb_signals = 0;
315 static volatile int transcode_init_done = 0;
316 static int main_return_code = 0;
317 
318 static void
320 {
321  received_sigterm = sig;
324  if(received_nb_signals > 3)
325  exit(123);
326 }
327 
328 void term_init(void)
329 {
330 #if HAVE_TERMIOS_H
331  if(!run_as_daemon){
332  struct termios tty;
333  int istty = 1;
334 #if HAVE_ISATTY
335  istty = isatty(0) && isatty(2);
336 #endif
337  if (istty && tcgetattr (0, &tty) == 0) {
338  oldtty = tty;
339  restore_tty = 1;
340 
341  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
342  |INLCR|IGNCR|ICRNL|IXON);
343  tty.c_oflag |= OPOST;
344  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
345  tty.c_cflag &= ~(CSIZE|PARENB);
346  tty.c_cflag |= CS8;
347  tty.c_cc[VMIN] = 1;
348  tty.c_cc[VTIME] = 0;
349 
350  tcsetattr (0, TCSANOW, &tty);
351  }
352  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
353  }
354 #endif
355 
356  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
357  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
358 #ifdef SIGXCPU
359  signal(SIGXCPU, sigterm_handler);
360 #endif
361 }
362 
363 /* read a key without blocking */
364 static int read_key(void)
365 {
366  unsigned char ch;
367 #if HAVE_TERMIOS_H
368  int n = 1;
369  struct timeval tv;
370  fd_set rfds;
371 
372  FD_ZERO(&rfds);
373  FD_SET(0, &rfds);
374  tv.tv_sec = 0;
375  tv.tv_usec = 0;
376  n = select(1, &rfds, NULL, NULL, &tv);
377  if (n > 0) {
378  n = read(0, &ch, 1);
379  if (n == 1)
380  return ch;
381 
382  return n;
383  }
384 #elif HAVE_KBHIT
385 # if HAVE_PEEKNAMEDPIPE
386  static int is_pipe;
387  static HANDLE input_handle;
388  DWORD dw, nchars;
389  if(!input_handle){
390  input_handle = GetStdHandle(STD_INPUT_HANDLE);
391  is_pipe = !GetConsoleMode(input_handle, &dw);
392  }
393 
394  if (stdin->_cnt > 0) {
395  read(0, &ch, 1);
396  return ch;
397  }
398  if (is_pipe) {
399  /* When running under a GUI, you will end here. */
400  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
401  // input pipe may have been closed by the program that ran ffmpeg
402  return -1;
403  }
404  //Read it
405  if(nchars != 0) {
406  read(0, &ch, 1);
407  return ch;
408  }else{
409  return -1;
410  }
411  }
412 # endif
413  if(kbhit())
414  return(getch());
415 #endif
416  return -1;
417 }
418 
419 static int decode_interrupt_cb(void *ctx)
420 {
422 }
423 
425 
426 static void ffmpeg_cleanup(int ret)
427 {
428  int i, j;
429 
430  if (do_benchmark) {
431  int maxrss = getmaxrss() / 1024;
432  printf("bench: maxrss=%ikB\n", maxrss);
433  }
434 
435  for (i = 0; i < nb_filtergraphs; i++) {
436  FilterGraph *fg = filtergraphs[i];
438  for (j = 0; j < fg->nb_inputs; j++) {
439  av_freep(&fg->inputs[j]->name);
440  av_freep(&fg->inputs[j]);
441  }
442  av_freep(&fg->inputs);
443  for (j = 0; j < fg->nb_outputs; j++) {
444  av_freep(&fg->outputs[j]->name);
445  av_freep(&fg->outputs[j]);
446  }
447  av_freep(&fg->outputs);
448  av_freep(&fg->graph_desc);
449 
450  av_freep(&filtergraphs[i]);
451  }
452  av_freep(&filtergraphs);
453 
455 
456  /* close files */
457  for (i = 0; i < nb_output_files; i++) {
458  OutputFile *of = output_files[i];
459  AVFormatContext *s = of->ctx;
460  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
461  avio_closep(&s->pb);
463  av_dict_free(&of->opts);
464 
465  av_freep(&output_files[i]);
466  }
467  for (i = 0; i < nb_output_streams; i++) {
468  OutputStream *ost = output_streams[i];
470  while (bsfc) {
471  AVBitStreamFilterContext *next = bsfc->next;
473  bsfc = next;
474  }
475  ost->bitstream_filters = NULL;
477  av_frame_free(&ost->last_frame);
478 
479  av_parser_close(ost->parser);
480 
481  av_freep(&ost->forced_keyframes);
483  av_freep(&ost->avfilter);
484  av_freep(&ost->logfile_prefix);
485 
487  ost->audio_channels_mapped = 0;
488 
490 
491  av_freep(&output_streams[i]);
492  }
493 #if HAVE_PTHREADS
494  free_input_threads();
495 #endif
496  for (i = 0; i < nb_input_files; i++) {
497  avformat_close_input(&input_files[i]->ctx);
498  av_freep(&input_files[i]);
499  }
500  for (i = 0; i < nb_input_streams; i++) {
501  InputStream *ist = input_streams[i];
502 
505  av_dict_free(&ist->decoder_opts);
508  av_freep(&ist->filters);
509  av_freep(&ist->hwaccel_device);
510 
512 
513  av_freep(&input_streams[i]);
514  }
515 
516  if (vstats_file)
517  fclose(vstats_file);
519 
520  av_freep(&input_streams);
521  av_freep(&input_files);
522  av_freep(&output_streams);
523  av_freep(&output_files);
524 
525  uninit_opts();
526 
528 
529  if (received_sigterm) {
530  av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
531  (int) received_sigterm);
532  } else if (ret && transcode_init_done) {
533  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
534  }
535  term_exit();
536 }
537 
539 {
540  AVDictionaryEntry *t = NULL;
541 
542  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
544  }
545 }
546 
548 {
550  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
551  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
552  exit_program(1);
553  }
554 }
555 
556 static void abort_codec_experimental(AVCodec *c, int encoder)
557 {
558  exit_program(1);
559 }
560 
561 static void update_benchmark(const char *fmt, ...)
562 {
563  if (do_benchmark_all) {
564  int64_t t = getutime();
565  va_list va;
566  char buf[1024];
567 
568  if (fmt) {
569  va_start(va, fmt);
570  vsnprintf(buf, sizeof(buf), fmt, va);
571  va_end(va);
572  printf("bench: %8"PRIu64" %s \n", t - current_time, buf);
573  }
574  current_time = t;
575  }
576 }
577 
578 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
579 {
580  int i;
581  for (i = 0; i < nb_output_streams; i++) {
582  OutputStream *ost2 = output_streams[i];
583  ost2->finished |= ost == ost2 ? this_stream : others;
584  }
585 }
586 
588 {
590  AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
591  int ret;
592 
593  if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
595  if (ost->st->codec->extradata) {
596  memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
598  }
599  }
600 
603  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
604 
605  /*
606  * Audio encoders may split the packets -- #frames in != #packets out.
607  * But there is no reordering, so we can limit the number of output packets
608  * by simply dropping them here.
609  * Counting encoded video frames needs to be done separately because of
610  * reordering, see do_video_out()
611  */
612  if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
613  if (ost->frame_number >= ost->max_frames) {
614  av_free_packet(pkt);
615  return;
616  }
617  ost->frame_number++;
618  }
619 
620  if (bsfc)
622 
623  while (bsfc) {
624  AVPacket new_pkt = *pkt;
625  AVDictionaryEntry *bsf_arg = av_dict_get(ost->bsf_args,
626  bsfc->filter->name,
627  NULL, 0);
628  int a = av_bitstream_filter_filter(bsfc, avctx,
629  bsf_arg ? bsf_arg->value : NULL,
630  &new_pkt.data, &new_pkt.size,
631  pkt->data, pkt->size,
632  pkt->flags & AV_PKT_FLAG_KEY);
633  if(a == 0 && new_pkt.data != pkt->data && new_pkt.destruct) {
634  uint8_t *t = av_malloc(new_pkt.size + FF_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
635  if(t) {
636  memcpy(t, new_pkt.data, new_pkt.size);
637  memset(t + new_pkt.size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
638  new_pkt.data = t;
639  new_pkt.buf = NULL;
640  a = 1;
641  } else
642  a = AVERROR(ENOMEM);
643  }
644  if (a > 0) {
645  pkt->side_data = NULL;
646  pkt->side_data_elems = 0;
647  av_free_packet(pkt);
648  new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
649  av_buffer_default_free, NULL, 0);
650  if (!new_pkt.buf)
651  exit_program(1);
652  } else if (a < 0) {
653  av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
654  bsfc->filter->name, pkt->stream_index,
655  avctx->codec ? avctx->codec->name : "copy");
656  print_error("", a);
657  if (exit_on_error)
658  exit_program(1);
659  }
660  *pkt = new_pkt;
661 
662  bsfc = bsfc->next;
663  }
664 
665  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
666  if (pkt->dts != AV_NOPTS_VALUE &&
667  pkt->pts != AV_NOPTS_VALUE &&
668  pkt->dts > pkt->pts) {
669  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
670  pkt->dts, pkt->pts,
671  ost->file_index, ost->st->index);
672  pkt->pts =
673  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
674  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
675  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
676  }
677  if(
678  (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
679  pkt->dts != AV_NOPTS_VALUE &&
680  ost->last_mux_dts != AV_NOPTS_VALUE) {
681  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
682  if (pkt->dts < max) {
683  int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
684  av_log(s, loglevel, "Non-monotonous DTS in output stream "
685  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
686  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
687  if (exit_on_error) {
688  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
689  exit_program(1);
690  }
691  av_log(s, loglevel, "changing to %"PRId64". This may result "
692  "in incorrect timestamps in the output file.\n",
693  max);
694  if(pkt->pts >= pkt->dts)
695  pkt->pts = FFMAX(pkt->pts, max);
696  pkt->dts = max;
697  }
698  }
699  }
700  ost->last_mux_dts = pkt->dts;
701 
702  ost->data_size += pkt->size;
703  ost->packets_written++;
704 
705  pkt->stream_index = ost->index;
706 
707  if (debug_ts) {
708  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
709  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
711  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
712  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
713  pkt->size
714  );
715  }
716 
717  ret = av_interleaved_write_frame(s, pkt);
718  if (ret < 0) {
719  print_error("av_interleaved_write_frame()", ret);
720  main_return_code = 1;
722  }
723  av_free_packet(pkt);
724 }
725 
727 {
728  OutputFile *of = output_files[ost->file_index];
729 
730  ost->finished |= ENCODER_FINISHED;
731  if (of->shortest) {
732  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
733  of->recording_time = FFMIN(of->recording_time, end);
734  }
735 }
736 
738 {
739  OutputFile *of = output_files[ost->file_index];
740 
741  if (of->recording_time != INT64_MAX &&
743  AV_TIME_BASE_Q) >= 0) {
744  close_output_stream(ost);
745  return 0;
746  }
747  return 1;
748 }
749 
751  AVFrame *frame)
752 {
753  AVCodecContext *enc = ost->enc_ctx;
754  AVPacket pkt;
755  int got_packet = 0;
756 
757  av_init_packet(&pkt);
758  pkt.data = NULL;
759  pkt.size = 0;
760 
761  if (!check_recording_time(ost))
762  return;
763 
764  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
765  frame->pts = ost->sync_opts;
766  ost->sync_opts = frame->pts + frame->nb_samples;
767  ost->samples_encoded += frame->nb_samples;
768  ost->frames_encoded++;
769 
770  av_assert0(pkt.size || !pkt.data);
772  if (debug_ts) {
773  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
774  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
775  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
776  enc->time_base.num, enc->time_base.den);
777  }
778 
779  if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
780  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
781  exit_program(1);
782  }
783  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
784 
785  if (got_packet) {
786  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
787 
788  if (debug_ts) {
789  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
790  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
791  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
792  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
793  }
794 
795  write_frame(s, &pkt, ost);
796  }
797 }
798 
800  OutputStream *ost,
801  InputStream *ist,
802  AVSubtitle *sub)
803 {
804  int subtitle_out_max_size = 1024 * 1024;
805  int subtitle_out_size, nb, i;
806  AVCodecContext *enc;
807  AVPacket pkt;
808  int64_t pts;
809 
810  if (sub->pts == AV_NOPTS_VALUE) {
811  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
812  if (exit_on_error)
813  exit_program(1);
814  return;
815  }
816 
817  enc = ost->enc_ctx;
818 
819  if (!subtitle_out) {
820  subtitle_out = av_malloc(subtitle_out_max_size);
821  if (!subtitle_out) {
822  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
823  exit_program(1);
824  }
825  }
826 
827  /* Note: DVB subtitle need one packet to draw them and one other
828  packet to clear them */
829  /* XXX: signal it in the codec context ? */
831  nb = 2;
832  else
833  nb = 1;
834 
835  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
836  pts = sub->pts;
837  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
838  pts -= output_files[ost->file_index]->start_time;
839  for (i = 0; i < nb; i++) {
840  unsigned save_num_rects = sub->num_rects;
841 
842  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
843  if (!check_recording_time(ost))
844  return;
845 
846  sub->pts = pts;
847  // start_display_time is required to be 0
848  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
850  sub->start_display_time = 0;
851  if (i == 1)
852  sub->num_rects = 0;
853 
854  ost->frames_encoded++;
855 
856  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
857  subtitle_out_max_size, sub);
858  if (i == 1)
859  sub->num_rects = save_num_rects;
860  if (subtitle_out_size < 0) {
861  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
862  exit_program(1);
863  }
864 
865  av_init_packet(&pkt);
866  pkt.data = subtitle_out;
867  pkt.size = subtitle_out_size;
868  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
869  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
870  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
871  /* XXX: the pts correction is handled here. Maybe handling
872  it in the codec would be better */
873  if (i == 0)
874  pkt.pts += 90 * sub->start_display_time;
875  else
876  pkt.pts += 90 * sub->end_display_time;
877  }
878  pkt.dts = pkt.pts;
879  write_frame(s, &pkt, ost);
880  }
881 }
882 
884  OutputStream *ost,
885  AVFrame *next_picture,
886  double sync_ipts)
887 {
888  int ret, format_video_sync;
889  AVPacket pkt;
890  AVCodecContext *enc = ost->enc_ctx;
891  AVCodecContext *mux_enc = ost->st->codec;
892  int nb_frames, nb0_frames, i;
893  double delta, delta0;
894  double duration = 0;
895  int frame_size = 0;
896  InputStream *ist = NULL;
898 
899  if (ost->source_index >= 0)
900  ist = input_streams[ost->source_index];
901 
902  if (filter->inputs[0]->frame_rate.num > 0 &&
903  filter->inputs[0]->frame_rate.den > 0)
904  duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
905 
906  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
907  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
908 
909  if (!ost->filters_script &&
910  !ost->filters &&
911  next_picture &&
912  ist &&
913  lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
914  duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
915  }
916 
917  if (!next_picture) {
918  //end, flushing
919  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
920  ost->last_nb0_frames[1],
921  ost->last_nb0_frames[2]);
922  } else {
923  delta0 = sync_ipts - ost->sync_opts;
924  delta = delta0 + duration;
925 
926  /* by default, we output a single frame */
927  nb0_frames = 0;
928  nb_frames = 1;
929 
930  format_video_sync = video_sync_method;
931  if (format_video_sync == VSYNC_AUTO) {
932  if(!strcmp(s->oformat->name, "avi")) {
933  format_video_sync = VSYNC_VFR;
934  } else
936  if ( ist
937  && format_video_sync == VSYNC_CFR
938  && input_files[ist->file_index]->ctx->nb_streams == 1
939  && input_files[ist->file_index]->input_ts_offset == 0) {
940  format_video_sync = VSYNC_VSCFR;
941  }
942  if (format_video_sync == VSYNC_CFR && copy_ts) {
943  format_video_sync = VSYNC_VSCFR;
944  }
945  }
946 
947  if (delta0 < 0 &&
948  delta > 0 &&
949  format_video_sync != VSYNC_PASSTHROUGH &&
950  format_video_sync != VSYNC_DROP) {
951  double cor = FFMIN(-delta0, duration);
952  if (delta0 < -0.6) {
953  av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
954  } else
955  av_log(NULL, AV_LOG_DEBUG, "Cliping frame in rate conversion by %f\n", -delta0);
956  sync_ipts += cor;
957  duration -= cor;
958  delta0 += cor;
959  }
960 
961  switch (format_video_sync) {
962  case VSYNC_VSCFR:
963  if (ost->frame_number == 0 && delta - duration >= 0.5) {
964  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
965  delta = duration;
966  delta0 = 0;
967  ost->sync_opts = lrint(sync_ipts);
968  }
969  case VSYNC_CFR:
970  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
971  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
972  nb_frames = 0;
973  } else if (delta < -1.1)
974  nb_frames = 0;
975  else if (delta > 1.1) {
976  nb_frames = lrintf(delta);
977  if (delta0 > 1.1)
978  nb0_frames = lrintf(delta0 - 0.6);
979  }
980  break;
981  case VSYNC_VFR:
982  if (delta <= -0.6)
983  nb_frames = 0;
984  else if (delta > 0.6)
985  ost->sync_opts = lrint(sync_ipts);
986  break;
987  case VSYNC_DROP:
988  case VSYNC_PASSTHROUGH:
989  ost->sync_opts = lrint(sync_ipts);
990  break;
991  default:
992  av_assert0(0);
993  }
994  }
995 
996  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
997  nb0_frames = FFMIN(nb0_frames, nb_frames);
998 
999  memmove(ost->last_nb0_frames + 1,
1000  ost->last_nb0_frames,
1001  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1002  ost->last_nb0_frames[0] = nb0_frames;
1003 
1004  if (nb0_frames == 0 && ost->last_droped) {
1005  nb_frames_drop++;
1007  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1008  ost->frame_number, ost->st->index, ost->last_frame->pts);
1009  }
1010  if (nb_frames > (nb0_frames && ost->last_droped) + (nb_frames > nb0_frames)) {
1011  if (nb_frames > dts_error_threshold * 30) {
1012  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1013  nb_frames_drop++;
1014  return;
1015  }
1016  nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
1017  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1018  }
1019  ost->last_droped = nb_frames == nb0_frames && next_picture;
1020 
1021  /* duplicates frame if needed */
1022  for (i = 0; i < nb_frames; i++) {
1023  AVFrame *in_picture;
1024  av_init_packet(&pkt);
1025  pkt.data = NULL;
1026  pkt.size = 0;
1027 
1028  if (i < nb0_frames && ost->last_frame) {
1029  in_picture = ost->last_frame;
1030  } else
1031  in_picture = next_picture;
1032 
1033  if (!in_picture)
1034  return;
1035 
1036  in_picture->pts = ost->sync_opts;
1037 
1038 #if 1
1039  if (!check_recording_time(ost))
1040 #else
1041  if (ost->frame_number >= ost->max_frames)
1042 #endif
1043  return;
1044 
1045  if (s->oformat->flags & AVFMT_RAWPICTURE &&
1046  enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1047  /* raw pictures are written as AVPicture structure to
1048  avoid any copies. We support temporarily the older
1049  method. */
1050  if (in_picture->interlaced_frame)
1051  mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1052  else
1053  mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1054  pkt.data = (uint8_t *)in_picture;
1055  pkt.size = sizeof(AVPicture);
1056  pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1057  pkt.flags |= AV_PKT_FLAG_KEY;
1058 
1059  write_frame(s, &pkt, ost);
1060  } else {
1061  int got_packet, forced_keyframe = 0;
1062  double pts_time;
1063 
1065  ost->top_field_first >= 0)
1066  in_picture->top_field_first = !!ost->top_field_first;
1067 
1068  if (in_picture->interlaced_frame) {
1069  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1070  mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1071  else
1072  mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1073  } else
1074  mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1075 
1076  in_picture->quality = enc->global_quality;
1077  in_picture->pict_type = 0;
1078 
1079  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1080  in_picture->pts * av_q2d(enc->time_base) : NAN;
1081  if (ost->forced_kf_index < ost->forced_kf_count &&
1082  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1083  ost->forced_kf_index++;
1084  forced_keyframe = 1;
1085  } else if (ost->forced_keyframes_pexpr) {
1086  double res;
1087  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1090  av_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1096  res);
1097  if (res) {
1098  forced_keyframe = 1;
1104  }
1105 
1107  } else if ( ost->forced_keyframes
1108  && !strncmp(ost->forced_keyframes, "source", 6)
1109  && in_picture->key_frame==1) {
1110  forced_keyframe = 1;
1111  }
1112 
1113  if (forced_keyframe) {
1114  in_picture->pict_type = AV_PICTURE_TYPE_I;
1115  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1116  }
1117 
1119  if (debug_ts) {
1120  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1121  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1122  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1123  enc->time_base.num, enc->time_base.den);
1124  }
1125 
1126  ost->frames_encoded++;
1127 
1128  ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1129  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1130  if (ret < 0) {
1131  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1132  exit_program(1);
1133  }
1134 
1135  if (got_packet) {
1136  if (debug_ts) {
1137  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1138  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1139  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1140  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1141  }
1142 
1143  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & CODEC_CAP_DELAY))
1144  pkt.pts = ost->sync_opts;
1145 
1146  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1147 
1148  if (debug_ts) {
1149  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1150  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1151  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1152  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1153  }
1154 
1155  frame_size = pkt.size;
1156  write_frame(s, &pkt, ost);
1157 
1158  /* if two pass, output log */
1159  if (ost->logfile && enc->stats_out) {
1160  fprintf(ost->logfile, "%s", enc->stats_out);
1161  }
1162  }
1163  }
1164  ost->sync_opts++;
1165  /*
1166  * For video, number of frames in == number of packets out.
1167  * But there may be reordering, so we can't throw away frames on encoder
1168  * flush, we need to limit them here, before they go into encoder.
1169  */
1170  ost->frame_number++;
1171 
1172  if (vstats_filename && frame_size)
1173  do_video_stats(ost, frame_size);
1174  }
1175 
1176  if (!ost->last_frame)
1177  ost->last_frame = av_frame_alloc();
1178  av_frame_unref(ost->last_frame);
1179  if (next_picture)
1180  av_frame_ref(ost->last_frame, next_picture);
1181 }
1182 
1183 static double psnr(double d)
1184 {
1185  return -10.0 * log(d) / log(10.0);
1186 }
1187 
1189 {
1190  AVCodecContext *enc;
1191  int frame_number;
1192  double ti1, bitrate, avg_bitrate;
1193 
1194  /* this is executed just the first time do_video_stats is called */
1195  if (!vstats_file) {
1196  vstats_file = fopen(vstats_filename, "w");
1197  if (!vstats_file) {
1198  perror("fopen");
1199  exit_program(1);
1200  }
1201  }
1202 
1203  enc = ost->enc_ctx;
1204  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1205  frame_number = ost->st->nb_frames;
1206  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame ? enc->coded_frame->quality / (float)FF_QP2LAMBDA : 0);
1207  if (enc->coded_frame && (enc->flags&CODEC_FLAG_PSNR))
1208  fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1209 
1210  fprintf(vstats_file,"f_size= %6d ", frame_size);
1211  /* compute pts value */
1212  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1213  if (ti1 < 0.01)
1214  ti1 = 0.01;
1215 
1216  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1217  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1218  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1219  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1220  fprintf(vstats_file, "type= %c\n", enc->coded_frame ? av_get_picture_type_char(enc->coded_frame->pict_type) : 'I');
1221  }
1222 }
1223 
1225 {
1226  OutputFile *of = output_files[ost->file_index];
1227  int i;
1228 
1230 
1231  if (of->shortest) {
1232  for (i = 0; i < of->ctx->nb_streams; i++)
1233  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1234  }
1235 }
1236 
1237 /**
1238  * Get and encode new output from any of the filtergraphs, without causing
1239  * activity.
1240  *
1241  * @return 0 for success, <0 for severe errors
1242  */
1243 static int reap_filters(int flush)
1244 {
1245  AVFrame *filtered_frame = NULL;
1246  int i;
1247 
1248  /* Reap all buffers present in the buffer sinks */
1249  for (i = 0; i < nb_output_streams; i++) {
1250  OutputStream *ost = output_streams[i];
1251  OutputFile *of = output_files[ost->file_index];
1253  AVCodecContext *enc = ost->enc_ctx;
1254  int ret = 0;
1255 
1256  if (!ost->filter)
1257  continue;
1258  filter = ost->filter->filter;
1259 
1260  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1261  return AVERROR(ENOMEM);
1262  }
1263  filtered_frame = ost->filtered_frame;
1264 
1265  while (1) {
1266  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1267  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1269  if (ret < 0) {
1270  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1272  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1273  } else if (flush && ret == AVERROR_EOF) {
1274  if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1275  do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1276  }
1277  break;
1278  }
1279  if (ost->finished) {
1280  av_frame_unref(filtered_frame);
1281  continue;
1282  }
1283  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1284  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1285  AVRational tb = enc->time_base;
1286  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1287 
1288  tb.den <<= extra_bits;
1289  float_pts =
1290  av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1291  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1292  float_pts /= 1 << extra_bits;
1293  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1294  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1295 
1296  filtered_frame->pts =
1297  av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1298  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1299  }
1300  //if (ost->source_index >= 0)
1301  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1302 
1303  switch (filter->inputs[0]->type) {
1304  case AVMEDIA_TYPE_VIDEO:
1305  if (!ost->frame_aspect_ratio.num)
1306  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1307 
1308  if (debug_ts) {
1309  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1310  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1311  float_pts,
1312  enc->time_base.num, enc->time_base.den);
1313  }
1314 
1315  do_video_out(of->ctx, ost, filtered_frame, float_pts);
1316  break;
1317  case AVMEDIA_TYPE_AUDIO:
1318  if (!(enc->codec->capabilities & CODEC_CAP_PARAM_CHANGE) &&
1319  enc->channels != av_frame_get_channels(filtered_frame)) {
1321  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1322  break;
1323  }
1324  do_audio_out(of->ctx, ost, filtered_frame);
1325  break;
1326  default:
1327  // TODO support subtitle filters
1328  av_assert0(0);
1329  }
1330 
1331  av_frame_unref(filtered_frame);
1332  }
1333  }
1334 
1335  return 0;
1336 }
1337 
1338 static void print_final_stats(int64_t total_size)
1339 {
1340  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1341  uint64_t subtitle_size = 0;
1342  uint64_t data_size = 0;
1343  float percent = -1.0;
1344  int i, j;
1345  int pass1_used = 1;
1346 
1347  for (i = 0; i < nb_output_streams; i++) {
1348  OutputStream *ost = output_streams[i];
1349  switch (ost->enc_ctx->codec_type) {
1350  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1351  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1352  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1353  default: other_size += ost->data_size; break;
1354  }
1355  extra_size += ost->enc_ctx->extradata_size;
1356  data_size += ost->data_size;
1357  if ( (ost->enc_ctx->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1358  != CODEC_FLAG_PASS1)
1359  pass1_used = 0;
1360  }
1361 
1362  if (data_size && total_size>0 && total_size >= data_size)
1363  percent = 100.0 * (total_size - data_size) / data_size;
1364 
1365  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1366  video_size / 1024.0,
1367  audio_size / 1024.0,
1368  subtitle_size / 1024.0,
1369  other_size / 1024.0,
1370  extra_size / 1024.0);
1371  if (percent >= 0.0)
1372  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1373  else
1374  av_log(NULL, AV_LOG_INFO, "unknown");
1375  av_log(NULL, AV_LOG_INFO, "\n");
1376 
1377  /* print verbose per-stream stats */
1378  for (i = 0; i < nb_input_files; i++) {
1379  InputFile *f = input_files[i];
1380  uint64_t total_packets = 0, total_size = 0;
1381 
1382  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1383  i, f->ctx->filename);
1384 
1385  for (j = 0; j < f->nb_streams; j++) {
1386  InputStream *ist = input_streams[f->ist_index + j];
1387  enum AVMediaType type = ist->dec_ctx->codec_type;
1388 
1389  total_size += ist->data_size;
1390  total_packets += ist->nb_packets;
1391 
1392  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1393  i, j, media_type_string(type));
1394  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1395  ist->nb_packets, ist->data_size);
1396 
1397  if (ist->decoding_needed) {
1398  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1399  ist->frames_decoded);
1400  if (type == AVMEDIA_TYPE_AUDIO)
1401  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1402  av_log(NULL, AV_LOG_VERBOSE, "; ");
1403  }
1404 
1405  av_log(NULL, AV_LOG_VERBOSE, "\n");
1406  }
1407 
1408  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1409  total_packets, total_size);
1410  }
1411 
1412  for (i = 0; i < nb_output_files; i++) {
1413  OutputFile *of = output_files[i];
1414  uint64_t total_packets = 0, total_size = 0;
1415 
1416  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1417  i, of->ctx->filename);
1418 
1419  for (j = 0; j < of->ctx->nb_streams; j++) {
1420  OutputStream *ost = output_streams[of->ost_index + j];
1421  enum AVMediaType type = ost->enc_ctx->codec_type;
1422 
1423  total_size += ost->data_size;
1424  total_packets += ost->packets_written;
1425 
1426  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1427  i, j, media_type_string(type));
1428  if (ost->encoding_needed) {
1429  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1430  ost->frames_encoded);
1431  if (type == AVMEDIA_TYPE_AUDIO)
1432  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1433  av_log(NULL, AV_LOG_VERBOSE, "; ");
1434  }
1435 
1436  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1437  ost->packets_written, ost->data_size);
1438 
1439  av_log(NULL, AV_LOG_VERBOSE, "\n");
1440  }
1441 
1442  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1443  total_packets, total_size);
1444  }
1445  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1446  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1447  if (pass1_used) {
1448  av_log(NULL, AV_LOG_WARNING, "\n");
1449  } else {
1450  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1451  }
1452  }
1453 }
1454 
1455 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1456 {
1457  char buf[1024];
1458  AVBPrint buf_script;
1459  OutputStream *ost;
1460  AVFormatContext *oc;
1461  int64_t total_size;
1462  AVCodecContext *enc;
1463  int frame_number, vid, i;
1464  double bitrate;
1465  int64_t pts = INT64_MIN;
1466  static int64_t last_time = -1;
1467  static int qp_histogram[52];
1468  int hours, mins, secs, us;
1469 
1470  if (!print_stats && !is_last_report && !progress_avio)
1471  return;
1472 
1473  if (!is_last_report) {
1474  if (last_time == -1) {
1475  last_time = cur_time;
1476  return;
1477  }
1478  if ((cur_time - last_time) < 500000)
1479  return;
1480  last_time = cur_time;
1481  }
1482 
1483 
1484  oc = output_files[0]->ctx;
1485 
1486  total_size = avio_size(oc->pb);
1487  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1488  total_size = avio_tell(oc->pb);
1489 
1490  buf[0] = '\0';
1491  vid = 0;
1492  av_bprint_init(&buf_script, 0, 1);
1493  for (i = 0; i < nb_output_streams; i++) {
1494  float q = -1;
1495  ost = output_streams[i];
1496  enc = ost->enc_ctx;
1497  if (!ost->stream_copy && enc->coded_frame)
1498  q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
1499  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1500  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1501  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1502  ost->file_index, ost->index, q);
1503  }
1504  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1505  float fps, t = (cur_time-timer_start) / 1000000.0;
1506 
1507  frame_number = ost->frame_number;
1508  fps = t > 1 ? frame_number / t : 0;
1509  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1510  frame_number, fps < 9.95, fps, q);
1511  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1512  av_bprintf(&buf_script, "fps=%.1f\n", fps);
1513  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1514  ost->file_index, ost->index, q);
1515  if (is_last_report)
1516  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1517  if (qp_hist) {
1518  int j;
1519  int qp = lrintf(q);
1520  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1521  qp_histogram[qp]++;
1522  for (j = 0; j < 32; j++)
1523  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
1524  }
1525  if ((enc->flags&CODEC_FLAG_PSNR) && (enc->coded_frame || is_last_report)) {
1526  int j;
1527  double error, error_sum = 0;
1528  double scale, scale_sum = 0;
1529  double p;
1530  char type[3] = { 'Y','U','V' };
1531  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1532  for (j = 0; j < 3; j++) {
1533  if (is_last_report) {
1534  error = enc->error[j];
1535  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1536  } else {
1537  error = enc->coded_frame->error[j];
1538  scale = enc->width * enc->height * 255.0 * 255.0;
1539  }
1540  if (j)
1541  scale /= 4;
1542  error_sum += error;
1543  scale_sum += scale;
1544  p = psnr(error / scale);
1545  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1546  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1547  ost->file_index, ost->index, type[j] | 32, p);
1548  }
1549  p = psnr(error_sum / scale_sum);
1550  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1551  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1552  ost->file_index, ost->index, p);
1553  }
1554  vid = 1;
1555  }
1556  /* compute min output value */
1558  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1559  ost->st->time_base, AV_TIME_BASE_Q));
1560  if (is_last_report)
1561  nb_frames_drop += ost->last_droped;
1562  }
1563 
1564  secs = FFABS(pts) / AV_TIME_BASE;
1565  us = FFABS(pts) % AV_TIME_BASE;
1566  mins = secs / 60;
1567  secs %= 60;
1568  hours = mins / 60;
1569  mins %= 60;
1570 
1571  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1572 
1573  if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1574  "size=N/A time=");
1575  else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1576  "size=%8.0fkB time=", total_size / 1024.0);
1577  if (pts < 0)
1578  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1579  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1580  "%02d:%02d:%02d.%02d ", hours, mins, secs,
1581  (100 * us) / AV_TIME_BASE);
1582 
1583  if (bitrate < 0) {
1584  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1585  av_bprintf(&buf_script, "bitrate=N/A\n");
1586  }else{
1587  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1588  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1589  }
1590 
1591  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1592  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1593  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1594  av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1595  hours, mins, secs, us);
1596 
1598  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1600  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1601  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1602 
1603  if (print_stats || is_last_report) {
1604  const char end = is_last_report ? '\n' : '\r';
1605  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1606  fprintf(stderr, "%s %c", buf, end);
1607  } else
1608  av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1609 
1610  fflush(stderr);
1611  }
1612 
1613  if (progress_avio) {
1614  av_bprintf(&buf_script, "progress=%s\n",
1615  is_last_report ? "end" : "continue");
1616  avio_write(progress_avio, buf_script.str,
1617  FFMIN(buf_script.len, buf_script.size - 1));
1618  avio_flush(progress_avio);
1619  av_bprint_finalize(&buf_script, NULL);
1620  if (is_last_report) {
1621  avio_closep(&progress_avio);
1622  }
1623  }
1624 
1625  if (is_last_report)
1626  print_final_stats(total_size);
1627 }
1628 
1629 static void flush_encoders(void)
1630 {
1631  int i, ret;
1632 
1633  for (i = 0; i < nb_output_streams; i++) {
1634  OutputStream *ost = output_streams[i];
1635  AVCodecContext *enc = ost->enc_ctx;
1636  AVFormatContext *os = output_files[ost->file_index]->ctx;
1637  int stop_encoding = 0;
1638 
1639  if (!ost->encoding_needed)
1640  continue;
1641 
1642  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1643  continue;
1645  continue;
1646 
1647  for (;;) {
1648  int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1649  const char *desc;
1650 
1651  switch (enc->codec_type) {
1652  case AVMEDIA_TYPE_AUDIO:
1653  encode = avcodec_encode_audio2;
1654  desc = "Audio";
1655  break;
1656  case AVMEDIA_TYPE_VIDEO:
1657  encode = avcodec_encode_video2;
1658  desc = "Video";
1659  break;
1660  default:
1661  stop_encoding = 1;
1662  }
1663 
1664  if (encode) {
1665  AVPacket pkt;
1666  int pkt_size;
1667  int got_packet;
1668  av_init_packet(&pkt);
1669  pkt.data = NULL;
1670  pkt.size = 0;
1671 
1673  ret = encode(enc, &pkt, NULL, &got_packet);
1674  update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1675  if (ret < 0) {
1676  av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
1677  exit_program(1);
1678  }
1679  if (ost->logfile && enc->stats_out) {
1680  fprintf(ost->logfile, "%s", enc->stats_out);
1681  }
1682  if (!got_packet) {
1683  stop_encoding = 1;
1684  break;
1685  }
1686  if (ost->finished & MUXER_FINISHED) {
1687  av_free_packet(&pkt);
1688  continue;
1689  }
1690  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1691  pkt_size = pkt.size;
1692  write_frame(os, &pkt, ost);
1694  do_video_stats(ost, pkt_size);
1695  }
1696  }
1697 
1698  if (stop_encoding)
1699  break;
1700  }
1701  }
1702 }
1703 
1704 /*
1705  * Check whether a packet from ist should be written into ost at this time
1706  */
1708 {
1709  OutputFile *of = output_files[ost->file_index];
1710  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1711 
1712  if (ost->source_index != ist_index)
1713  return 0;
1714 
1715  if (ost->finished)
1716  return 0;
1717 
1718  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1719  return 0;
1720 
1721  return 1;
1722 }
1723 
1724 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1725 {
1726  OutputFile *of = output_files[ost->file_index];
1727  InputFile *f = input_files [ist->file_index];
1728  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1729  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1730  int64_t ist_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ist->st->time_base);
1731  AVPicture pict;
1732  AVPacket opkt;
1733 
1734  av_init_packet(&opkt);
1735 
1736  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1738  return;
1739 
1740  if (pkt->pts == AV_NOPTS_VALUE) {
1741  if (!ost->frame_number && ist->pts < start_time &&
1742  !ost->copy_prior_start)
1743  return;
1744  } else {
1745  if (!ost->frame_number && pkt->pts < ist_tb_start_time &&
1746  !ost->copy_prior_start)
1747  return;
1748  }
1749 
1750  if (of->recording_time != INT64_MAX &&
1751  ist->pts >= of->recording_time + start_time) {
1752  close_output_stream(ost);
1753  return;
1754  }
1755 
1756  if (f->recording_time != INT64_MAX) {
1757  start_time = f->ctx->start_time;
1758  if (f->start_time != AV_NOPTS_VALUE)
1759  start_time += f->start_time;
1760  if (ist->pts >= f->recording_time + start_time) {
1761  close_output_stream(ost);
1762  return;
1763  }
1764  }
1765 
1766  /* force the input stream PTS */
1767  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1768  ost->sync_opts++;
1769 
1770  if (pkt->pts != AV_NOPTS_VALUE)
1771  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1772  else
1773  opkt.pts = AV_NOPTS_VALUE;
1774 
1775  if (pkt->dts == AV_NOPTS_VALUE)
1776  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1777  else
1778  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1779  opkt.dts -= ost_tb_start_time;
1780 
1781  if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1783  if(!duration)
1784  duration = ist->dec_ctx->frame_size;
1785  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1787  ost->st->time_base) - ost_tb_start_time;
1788  }
1789 
1790  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1791  opkt.flags = pkt->flags;
1792 
1793  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1794  if ( ost->enc_ctx->codec_id != AV_CODEC_ID_H264
1795  && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG1VIDEO
1796  && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG2VIDEO
1797  && ost->enc_ctx->codec_id != AV_CODEC_ID_VC1
1798  ) {
1799  if (av_parser_change(ost->parser, ost->st->codec,
1800  &opkt.data, &opkt.size,
1801  pkt->data, pkt->size,
1802  pkt->flags & AV_PKT_FLAG_KEY)) {
1803  opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1804  if (!opkt.buf)
1805  exit_program(1);
1806  }
1807  } else {
1808  opkt.data = pkt->data;
1809  opkt.size = pkt->size;
1810  }
1811  av_copy_packet_side_data(&opkt, pkt);
1812 
1813  if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1814  /* store AVPicture in AVPacket, as expected by the output format */
1815  avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1816  opkt.data = (uint8_t *)&pict;
1817  opkt.size = sizeof(AVPicture);
1818  opkt.flags |= AV_PKT_FLAG_KEY;
1819  }
1820 
1821  write_frame(of->ctx, &opkt, ost);
1822 }
1823 
1825 {
1826  AVCodecContext *dec = ist->dec_ctx;
1827 
1828  if (!dec->channel_layout) {
1829  char layout_name[256];
1830 
1831  if (dec->channels > ist->guess_layout_max)
1832  return 0;
1834  if (!dec->channel_layout)
1835  return 0;
1836  av_get_channel_layout_string(layout_name, sizeof(layout_name),
1837  dec->channels, dec->channel_layout);
1838  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1839  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1840  }
1841  return 1;
1842 }
1843 
1844 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1845 {
1846  AVFrame *decoded_frame, *f;
1847  AVCodecContext *avctx = ist->dec_ctx;
1848  int i, ret, err = 0, resample_changed;
1849  AVRational decoded_frame_tb;
1850 
1851  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1852  return AVERROR(ENOMEM);
1853  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1854  return AVERROR(ENOMEM);
1855  decoded_frame = ist->decoded_frame;
1856 
1858  ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1859  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1860 
1861  if (ret >= 0 && avctx->sample_rate <= 0) {
1862  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1863  ret = AVERROR_INVALIDDATA;
1864  }
1865 
1866  if (*got_output || ret<0 || pkt->size)
1867  decode_error_stat[ret<0] ++;
1868 
1869  if (!*got_output || ret < 0) {
1870  if (!pkt->size) {
1871  for (i = 0; i < ist->nb_filters; i++)
1872 #if 1
1873  av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
1874 #else
1876 #endif
1877  }
1878  return ret;
1879  }
1880 
1881  ist->samples_decoded += decoded_frame->nb_samples;
1882  ist->frames_decoded++;
1883 
1884 #if 1
1885  /* increment next_dts to use for the case where the input stream does not
1886  have timestamps or there are multiple frames in the packet */
1887  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1888  avctx->sample_rate;
1889  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1890  avctx->sample_rate;
1891 #endif
1892 
1893  resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1894  ist->resample_channels != avctx->channels ||
1895  ist->resample_channel_layout != decoded_frame->channel_layout ||
1896  ist->resample_sample_rate != decoded_frame->sample_rate;
1897  if (resample_changed) {
1898  char layout1[64], layout2[64];
1899 
1900  if (!guess_input_channel_layout(ist)) {
1901  av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1902  "layout for Input Stream #%d.%d\n", ist->file_index,
1903  ist->st->index);
1904  exit_program(1);
1905  }
1906  decoded_frame->channel_layout = avctx->channel_layout;
1907 
1908  av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1910  av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1911  decoded_frame->channel_layout);
1912 
1914  "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1915  ist->file_index, ist->st->index,
1917  ist->resample_channels, layout1,
1918  decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
1919  avctx->channels, layout2);
1920 
1921  ist->resample_sample_fmt = decoded_frame->format;
1922  ist->resample_sample_rate = decoded_frame->sample_rate;
1923  ist->resample_channel_layout = decoded_frame->channel_layout;
1924  ist->resample_channels = avctx->channels;
1925 
1926  for (i = 0; i < nb_filtergraphs; i++)
1927  if (ist_in_filtergraph(filtergraphs[i], ist)) {
1928  FilterGraph *fg = filtergraphs[i];
1929  if (configure_filtergraph(fg) < 0) {
1930  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1931  exit_program(1);
1932  }
1933  }
1934  }
1935 
1936  /* if the decoder provides a pts, use it instead of the last packet pts.
1937  the decoder could be delaying output by a packet or more. */
1938  if (decoded_frame->pts != AV_NOPTS_VALUE) {
1939  ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
1940  decoded_frame_tb = avctx->time_base;
1941  } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
1942  decoded_frame->pts = decoded_frame->pkt_pts;
1943  decoded_frame_tb = ist->st->time_base;
1944  } else if (pkt->pts != AV_NOPTS_VALUE) {
1945  decoded_frame->pts = pkt->pts;
1946  decoded_frame_tb = ist->st->time_base;
1947  }else {
1948  decoded_frame->pts = ist->dts;
1949  decoded_frame_tb = AV_TIME_BASE_Q;
1950  }
1951  pkt->pts = AV_NOPTS_VALUE;
1952  if (decoded_frame->pts != AV_NOPTS_VALUE)
1953  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
1954  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
1955  (AVRational){1, avctx->sample_rate});
1956  for (i = 0; i < ist->nb_filters; i++) {
1957  if (i < ist->nb_filters - 1) {
1958  f = ist->filter_frame;
1959  err = av_frame_ref(f, decoded_frame);
1960  if (err < 0)
1961  break;
1962  } else
1963  f = decoded_frame;
1964  err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
1966  if (err == AVERROR_EOF)
1967  err = 0; /* ignore */
1968  if (err < 0)
1969  break;
1970  }
1971  decoded_frame->pts = AV_NOPTS_VALUE;
1972 
1973  av_frame_unref(ist->filter_frame);
1974  av_frame_unref(decoded_frame);
1975  return err < 0 ? err : ret;
1976 }
1977 
1978 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
1979 {
1980  AVFrame *decoded_frame, *f;
1981  int i, ret = 0, err = 0, resample_changed;
1982  int64_t best_effort_timestamp;
1983  AVRational *frame_sample_aspect;
1984 
1985  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1986  return AVERROR(ENOMEM);
1987  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1988  return AVERROR(ENOMEM);
1989  decoded_frame = ist->decoded_frame;
1990  pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
1991 
1993  ret = avcodec_decode_video2(ist->dec_ctx,
1994  decoded_frame, got_output, pkt);
1995  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
1996 
1997  // The following line may be required in some cases where there is no parser
1998  // or the parser does not has_b_frames correctly
1999  if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2000  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2001  ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2002  } else
2004  ist->dec_ctx,
2005  "has_b_frames is larger in decoder than demuxer %d > %d ",
2006  ist->dec_ctx->has_b_frames,
2007  ist->st->codec->has_b_frames
2008  );
2009  }
2010 
2011  if (*got_output || ret<0 || pkt->size)
2012  decode_error_stat[ret<0] ++;
2013 
2014  if (*got_output && ret >= 0) {
2015  if (ist->dec_ctx->width != decoded_frame->width ||
2016  ist->dec_ctx->height != decoded_frame->height ||
2017  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2018  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2019  decoded_frame->width,
2020  decoded_frame->height,
2021  decoded_frame->format,
2022  ist->dec_ctx->width,
2023  ist->dec_ctx->height,
2024  ist->dec_ctx->pix_fmt);
2025  }
2026  }
2027 
2028  if (!*got_output || ret < 0) {
2029  if (!pkt->size) {
2030  for (i = 0; i < ist->nb_filters; i++)
2031 #if 1
2032  av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
2033 #else
2035 #endif
2036  }
2037  return ret;
2038  }
2039 
2040  if(ist->top_field_first>=0)
2041  decoded_frame->top_field_first = ist->top_field_first;
2042 
2043  ist->frames_decoded++;
2044 
2045  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2046  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2047  if (err < 0)
2048  goto fail;
2049  }
2050  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2051 
2052  best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2053  if(best_effort_timestamp != AV_NOPTS_VALUE)
2054  ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2055 
2056  if (debug_ts) {
2057  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2058  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2059  ist->st->index, av_ts2str(decoded_frame->pts),
2060  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2061  best_effort_timestamp,
2062  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2063  decoded_frame->key_frame, decoded_frame->pict_type,
2064  ist->st->time_base.num, ist->st->time_base.den);
2065  }
2066 
2067  pkt->size = 0;
2068 
2069  if (ist->st->sample_aspect_ratio.num)
2070  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2071 
2072  resample_changed = ist->resample_width != decoded_frame->width ||
2073  ist->resample_height != decoded_frame->height ||
2074  ist->resample_pix_fmt != decoded_frame->format;
2075  if (resample_changed) {
2077  "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2078  ist->file_index, ist->st->index,
2080  decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2081 
2082  ist->resample_width = decoded_frame->width;
2083  ist->resample_height = decoded_frame->height;
2084  ist->resample_pix_fmt = decoded_frame->format;
2085 
2086  for (i = 0; i < nb_filtergraphs; i++) {
2087  if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2088  configure_filtergraph(filtergraphs[i]) < 0) {
2089  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2090  exit_program(1);
2091  }
2092  }
2093  }
2094 
2095  frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2096  for (i = 0; i < ist->nb_filters; i++) {
2097  if (!frame_sample_aspect->num)
2098  *frame_sample_aspect = ist->st->sample_aspect_ratio;
2099 
2100  if (i < ist->nb_filters - 1) {
2101  f = ist->filter_frame;
2102  err = av_frame_ref(f, decoded_frame);
2103  if (err < 0)
2104  break;
2105  } else
2106  f = decoded_frame;
2108  if (ret == AVERROR_EOF) {
2109  ret = 0; /* ignore */
2110  } else if (ret < 0) {
2112  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2113  exit_program(1);
2114  }
2115  }
2116 
2117 fail:
2119  av_frame_unref(decoded_frame);
2120  return err < 0 ? err : ret;
2121 }
2122 
2123 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2124 {
2125  AVSubtitle subtitle;
2126  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2127  &subtitle, got_output, pkt);
2128 
2129  if (*got_output || ret<0 || pkt->size)
2130  decode_error_stat[ret<0] ++;
2131 
2132  if (ret < 0 || !*got_output) {
2133  if (!pkt->size)
2134  sub2video_flush(ist);
2135  return ret;
2136  }
2137 
2138  if (ist->fix_sub_duration) {
2139  int end = 1;
2140  if (ist->prev_sub.got_output) {
2141  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2142  1000, AV_TIME_BASE);
2143  if (end < ist->prev_sub.subtitle.end_display_time) {
2144  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2145  "Subtitle duration reduced from %d to %d%s\n",
2147  end <= 0 ? ", dropping it" : "");
2149  }
2150  }
2151  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2152  FFSWAP(int, ret, ist->prev_sub.ret);
2153  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2154  if (end <= 0)
2155  goto out;
2156  }
2157 
2158  if (!*got_output)
2159  return ret;
2160 
2161  sub2video_update(ist, &subtitle);
2162 
2163  if (!subtitle.num_rects)
2164  goto out;
2165 
2166  ist->frames_decoded++;
2167 
2168  for (i = 0; i < nb_output_streams; i++) {
2169  OutputStream *ost = output_streams[i];
2170 
2171  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2172  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2173  continue;
2174 
2175  do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2176  }
2177 
2178 out:
2179  avsubtitle_free(&subtitle);
2180  return ret;
2181 }
2182 
2183 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2185 {
2186  int ret = 0, i;
2187  int got_output = 0;
2188 
2189  AVPacket avpkt;
2190  if (!ist->saw_first_ts) {
2191  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2192  ist->pts = 0;
2193  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2194  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2195  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2196  }
2197  ist->saw_first_ts = 1;
2198  }
2199 
2200  if (ist->next_dts == AV_NOPTS_VALUE)
2201  ist->next_dts = ist->dts;
2202  if (ist->next_pts == AV_NOPTS_VALUE)
2203  ist->next_pts = ist->pts;
2204 
2205  if (!pkt) {
2206  /* EOF handling */
2207  av_init_packet(&avpkt);
2208  avpkt.data = NULL;
2209  avpkt.size = 0;
2210  goto handle_eof;
2211  } else {
2212  avpkt = *pkt;
2213  }
2214 
2215  if (pkt->dts != AV_NOPTS_VALUE) {
2216  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2217  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2218  ist->next_pts = ist->pts = ist->dts;
2219  }
2220 
2221  // while we have more to decode or while the decoder did output something on EOF
2222  while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2223  int duration;
2224  handle_eof:
2225 
2226  ist->pts = ist->next_pts;
2227  ist->dts = ist->next_dts;
2228 
2229  if (avpkt.size && avpkt.size != pkt->size &&
2230  !(ist->dec->capabilities & CODEC_CAP_SUBFRAMES)) {
2232  "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2233  ist->showed_multi_packet_warning = 1;
2234  }
2235 
2236  switch (ist->dec_ctx->codec_type) {
2237  case AVMEDIA_TYPE_AUDIO:
2238  ret = decode_audio (ist, &avpkt, &got_output);
2239  break;
2240  case AVMEDIA_TYPE_VIDEO:
2241  ret = decode_video (ist, &avpkt, &got_output);
2242  if (avpkt.duration) {
2243  duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2244  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2246  duration = ((int64_t)AV_TIME_BASE *
2247  ist->dec_ctx->framerate.den * ticks) /
2249  } else
2250  duration = 0;
2251 
2252  if(ist->dts != AV_NOPTS_VALUE && duration) {
2253  ist->next_dts += duration;
2254  }else
2255  ist->next_dts = AV_NOPTS_VALUE;
2256 
2257  if (got_output)
2258  ist->next_pts += duration; //FIXME the duration is not correct in some cases
2259  break;
2260  case AVMEDIA_TYPE_SUBTITLE:
2261  ret = transcode_subtitles(ist, &avpkt, &got_output);
2262  break;
2263  default:
2264  return -1;
2265  }
2266 
2267  if (ret < 0)
2268  return ret;
2269 
2270  avpkt.dts=
2271  avpkt.pts= AV_NOPTS_VALUE;
2272 
2273  // touch data and size only if not EOF
2274  if (pkt) {
2276  ret = avpkt.size;
2277  avpkt.data += ret;
2278  avpkt.size -= ret;
2279  }
2280  if (!got_output) {
2281  continue;
2282  }
2283  if (got_output && !pkt)
2284  break;
2285  }
2286 
2287  /* handle stream copy */
2288  if (!ist->decoding_needed) {
2289  ist->dts = ist->next_dts;
2290  switch (ist->dec_ctx->codec_type) {
2291  case AVMEDIA_TYPE_AUDIO:
2292  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2293  ist->dec_ctx->sample_rate;
2294  break;
2295  case AVMEDIA_TYPE_VIDEO:
2296  if (ist->framerate.num) {
2297  // TODO: Remove work-around for c99-to-c89 issue 7
2298  AVRational time_base_q = AV_TIME_BASE_Q;
2299  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2300  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2301  } else if (pkt->duration) {
2302  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2303  } else if(ist->dec_ctx->framerate.num != 0) {
2304  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2305  ist->next_dts += ((int64_t)AV_TIME_BASE *
2306  ist->dec_ctx->framerate.den * ticks) /
2308  }
2309  break;
2310  }
2311  ist->pts = ist->dts;
2312  ist->next_pts = ist->next_dts;
2313  }
2314  for (i = 0; pkt && i < nb_output_streams; i++) {
2315  OutputStream *ost = output_streams[i];
2316 
2317  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2318  continue;
2319 
2320  do_streamcopy(ist, ost, pkt);
2321  }
2322 
2323  return got_output;
2324 }
2325 
2326 static void print_sdp(void)
2327 {
2328  char sdp[16384];
2329  int i;
2330  int j;
2331  AVIOContext *sdp_pb;
2332  AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2333 
2334  if (!avc)
2335  exit_program(1);
2336  for (i = 0, j = 0; i < nb_output_files; i++) {
2337  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2338  avc[j] = output_files[i]->ctx;
2339  j++;
2340  }
2341  }
2342 
2343  av_sdp_create(avc, j, sdp, sizeof(sdp));
2344 
2345  if (!sdp_filename) {
2346  printf("SDP:\n%s\n", sdp);
2347  fflush(stdout);
2348  } else {
2349  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2350  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2351  } else {
2352  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2353  avio_closep(&sdp_pb);
2355  }
2356  }
2357 
2358  av_freep(&avc);
2359 }
2360 
2362 {
2363  int i;
2364  for (i = 0; hwaccels[i].name; i++)
2365  if (hwaccels[i].pix_fmt == pix_fmt)
2366  return &hwaccels[i];
2367  return NULL;
2368 }
2369 
2370 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2371 {
2372  InputStream *ist = s->opaque;
2373  const enum AVPixelFormat *p;
2374  int ret;
2375 
2376  for (p = pix_fmts; *p != -1; p++) {
2377  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2378  const HWAccel *hwaccel;
2379 
2380  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2381  break;
2382 
2383  hwaccel = get_hwaccel(*p);
2384  if (!hwaccel ||
2385  (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2386  (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2387  continue;
2388 
2389  ret = hwaccel->init(s);
2390  if (ret < 0) {
2391  if (ist->hwaccel_id == hwaccel->id) {
2393  "%s hwaccel requested for input stream #%d:%d, "
2394  "but cannot be initialized.\n", hwaccel->name,
2395  ist->file_index, ist->st->index);
2396  return AV_PIX_FMT_NONE;
2397  }
2398  continue;
2399  }
2400  ist->active_hwaccel_id = hwaccel->id;
2401  ist->hwaccel_pix_fmt = *p;
2402  break;
2403  }
2404 
2405  return *p;
2406 }
2407 
2409 {
2410  InputStream *ist = s->opaque;
2411 
2412  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2413  return ist->hwaccel_get_buffer(s, frame, flags);
2414 
2415  return avcodec_default_get_buffer2(s, frame, flags);
2416 }
2417 
2418 static int init_input_stream(int ist_index, char *error, int error_len)
2419 {
2420  int ret;
2421  InputStream *ist = input_streams[ist_index];
2422 
2423  if (ist->decoding_needed) {
2424  AVCodec *codec = ist->dec;
2425  if (!codec) {
2426  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2427  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2428  return AVERROR(EINVAL);
2429  }
2430 
2431  ist->dec_ctx->opaque = ist;
2432  ist->dec_ctx->get_format = get_format;
2433  ist->dec_ctx->get_buffer2 = get_buffer;
2434  ist->dec_ctx->thread_safe_callbacks = 1;
2435 
2436  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2437  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2438  (ist->decoding_needed & DECODING_FOR_OST)) {
2439  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2441  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2442  }
2443 
2444  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2445  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2446  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2447  if (ret == AVERROR_EXPERIMENTAL)
2448  abort_codec_experimental(codec, 0);
2449 
2450  snprintf(error, error_len,
2451  "Error while opening decoder for input stream "
2452  "#%d:%d : %s",
2453  ist->file_index, ist->st->index, av_err2str(ret));
2454  return ret;
2455  }
2457  }
2458 
2459  ist->next_pts = AV_NOPTS_VALUE;
2460  ist->next_dts = AV_NOPTS_VALUE;
2461 
2462  return 0;
2463 }
2464 
2466 {
2467  if (ost->source_index >= 0)
2468  return input_streams[ost->source_index];
2469  return NULL;
2470 }
2471 
2472 static int compare_int64(const void *a, const void *b)
2473 {
2474  int64_t va = *(int64_t *)a, vb = *(int64_t *)b;
2475  return va < vb ? -1 : va > vb ? +1 : 0;
2476 }
2477 
2478 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2479  AVCodecContext *avctx)
2480 {
2481  char *p;
2482  int n = 1, i, size, index = 0;
2483  int64_t t, *pts;
2484 
2485  for (p = kf; *p; p++)
2486  if (*p == ',')
2487  n++;
2488  size = n;
2489  pts = av_malloc_array(size, sizeof(*pts));
2490  if (!pts) {
2491  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2492  exit_program(1);
2493  }
2494 
2495  p = kf;
2496  for (i = 0; i < n; i++) {
2497  char *next = strchr(p, ',');
2498 
2499  if (next)
2500  *next++ = 0;
2501 
2502  if (!memcmp(p, "chapters", 8)) {
2503 
2504  AVFormatContext *avf = output_files[ost->file_index]->ctx;
2505  int j;
2506 
2507  if (avf->nb_chapters > INT_MAX - size ||
2508  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2509  sizeof(*pts)))) {
2511  "Could not allocate forced key frames array.\n");
2512  exit_program(1);
2513  }
2514  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2515  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2516 
2517  for (j = 0; j < avf->nb_chapters; j++) {
2518  AVChapter *c = avf->chapters[j];
2519  av_assert1(index < size);
2520  pts[index++] = av_rescale_q(c->start, c->time_base,
2521  avctx->time_base) + t;
2522  }
2523 
2524  } else {
2525 
2526  t = parse_time_or_die("force_key_frames", p, 1);
2527  av_assert1(index < size);
2528  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2529 
2530  }
2531 
2532  p = next;
2533  }
2534 
2535  av_assert0(index == size);
2536  qsort(pts, size, sizeof(*pts), compare_int64);
2537  ost->forced_kf_count = size;
2538  ost->forced_kf_pts = pts;
2539 }
2540 
2541 static void report_new_stream(int input_index, AVPacket *pkt)
2542 {
2543  InputFile *file = input_files[input_index];
2544  AVStream *st = file->ctx->streams[pkt->stream_index];
2545 
2546  if (pkt->stream_index < file->nb_streams_warn)
2547  return;
2548  av_log(file->ctx, AV_LOG_WARNING,
2549  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2551  input_index, pkt->stream_index,
2552  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2553  file->nb_streams_warn = pkt->stream_index + 1;
2554 }
2555 
2557 {
2558  AVDictionaryEntry *e;
2559 
2560  uint8_t *encoder_string;
2561  int encoder_string_len;
2562  int format_flags = 0;
2563  int codec_flags = 0;
2564 
2565  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2566  return;
2567 
2568  e = av_dict_get(of->opts, "fflags", NULL, 0);
2569  if (e) {
2570  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2571  if (!o)
2572  return;
2573  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2574  }
2575  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2576  if (e) {
2577  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2578  if (!o)
2579  return;
2580  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2581  }
2582 
2583  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2584  encoder_string = av_mallocz(encoder_string_len);
2585  if (!encoder_string)
2586  exit_program(1);
2587 
2588  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & CODEC_FLAG_BITEXACT))
2589  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2590  else
2591  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2592  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2593  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2595 }
2596 
2597 static int transcode_init(void)
2598 {
2599  int ret = 0, i, j, k;
2600  AVFormatContext *oc;
2601  OutputStream *ost;
2602  InputStream *ist;
2603  char error[1024] = {0};
2604  int want_sdp = 1;
2605 
2606  for (i = 0; i < nb_filtergraphs; i++) {
2607  FilterGraph *fg = filtergraphs[i];
2608  for (j = 0; j < fg->nb_outputs; j++) {
2609  OutputFilter *ofilter = fg->outputs[j];
2610  if (!ofilter->ost || ofilter->ost->source_index >= 0)
2611  continue;
2612  if (fg->nb_inputs != 1)
2613  continue;
2614  for (k = nb_input_streams-1; k >= 0 ; k--)
2615  if (fg->inputs[0]->ist == input_streams[k])
2616  break;
2617  ofilter->ost->source_index = k;
2618  }
2619  }
2620 
2621  /* init framerate emulation */
2622  for (i = 0; i < nb_input_files; i++) {
2623  InputFile *ifile = input_files[i];
2624  if (ifile->rate_emu)
2625  for (j = 0; j < ifile->nb_streams; j++)
2626  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2627  }
2628 
2629  /* output stream init */
2630  for (i = 0; i < nb_output_files; i++) {
2631  oc = output_files[i]->ctx;
2632  if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
2633  av_dump_format(oc, i, oc->filename, 1);
2634  av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
2635  return AVERROR(EINVAL);
2636  }
2637  }
2638 
2639  /* init complex filtergraphs */
2640  for (i = 0; i < nb_filtergraphs; i++)
2641  if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
2642  return ret;
2643 
2644  /* for each output stream, we compute the right encoding parameters */
2645  for (i = 0; i < nb_output_streams; i++) {
2646  AVCodecContext *enc_ctx;
2648  ost = output_streams[i];
2649  oc = output_files[ost->file_index]->ctx;
2650  ist = get_input_stream(ost);
2651 
2652  if (ost->attachment_filename)
2653  continue;
2654 
2655  enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2656 
2657  if (ist) {
2658  dec_ctx = ist->dec_ctx;
2659 
2660  ost->st->disposition = ist->st->disposition;
2661  enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2662  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2663  } else {
2664  for (j=0; j<oc->nb_streams; j++) {
2665  AVStream *st = oc->streams[j];
2666  if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2667  break;
2668  }
2669  if (j == oc->nb_streams)
2670  if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2672  }
2673 
2674  if (ost->stream_copy) {
2675  AVRational sar;
2676  uint64_t extra_size;
2677 
2678  av_assert0(ist && !ost->filter);
2679 
2680  extra_size = (uint64_t)dec_ctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
2681 
2682  if (extra_size > INT_MAX) {
2683  return AVERROR(EINVAL);
2684  }
2685 
2686  /* if stream_copy is selected, no need to decode or encode */
2687  enc_ctx->codec_id = dec_ctx->codec_id;
2688  enc_ctx->codec_type = dec_ctx->codec_type;
2689 
2690  if (!enc_ctx->codec_tag) {
2691  unsigned int codec_tag;
2692  if (!oc->oformat->codec_tag ||
2693  av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2694  !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2695  enc_ctx->codec_tag = dec_ctx->codec_tag;
2696  }
2697 
2698  enc_ctx->bit_rate = dec_ctx->bit_rate;
2699  enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2700  enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2701  enc_ctx->field_order = dec_ctx->field_order;
2702  if (dec_ctx->extradata_size) {
2703  enc_ctx->extradata = av_mallocz(extra_size);
2704  if (!enc_ctx->extradata) {
2705  return AVERROR(ENOMEM);
2706  }
2707  memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2708  }
2709  enc_ctx->extradata_size= dec_ctx->extradata_size;
2710  enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2711 
2712  enc_ctx->time_base = ist->st->time_base;
2713  /*
2714  * Avi is a special case here because it supports variable fps but
2715  * having the fps and timebase differe significantly adds quite some
2716  * overhead
2717  */
2718  if(!strcmp(oc->oformat->name, "avi")) {
2719  if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2720  && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2721  && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2722  && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2723  || copy_tb==2){
2724  enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2725  enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2726  enc_ctx->ticks_per_frame = 2;
2727  } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2728  && av_q2d(ist->st->time_base) < 1.0/500
2729  || copy_tb==0){
2730  enc_ctx->time_base = dec_ctx->time_base;
2731  enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2732  enc_ctx->time_base.den *= 2;
2733  enc_ctx->ticks_per_frame = 2;
2734  }
2735  } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2736  && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2737  && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2738  && strcmp(oc->oformat->name, "f4v")
2739  ) {
2740  if( copy_tb<0 && dec_ctx->time_base.den
2741  && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2742  && av_q2d(ist->st->time_base) < 1.0/500
2743  || copy_tb==0){
2744  enc_ctx->time_base = dec_ctx->time_base;
2745  enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2746  }
2747  }
2748  if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2749  && dec_ctx->time_base.num < dec_ctx->time_base.den
2750  && dec_ctx->time_base.num > 0
2751  && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2752  enc_ctx->time_base = dec_ctx->time_base;
2753  }
2754 
2755  if (ist && !ost->frame_rate.num)
2756  ost->frame_rate = ist->framerate;
2757  if(ost->frame_rate.num)
2758  enc_ctx->time_base = av_inv_q(ost->frame_rate);
2759 
2760  av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2761  enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2762 
2763  if (ist->st->nb_side_data) {
2765  sizeof(*ist->st->side_data));
2766  if (!ost->st->side_data)
2767  return AVERROR(ENOMEM);
2768 
2769  ost->st->nb_side_data = 0;
2770  for (j = 0; j < ist->st->nb_side_data; j++) {
2771  const AVPacketSideData *sd_src = &ist->st->side_data[j];
2772  AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2773 
2774  if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2775  continue;
2776 
2777  sd_dst->data = av_malloc(sd_src->size);
2778  if (!sd_dst->data)
2779  return AVERROR(ENOMEM);
2780  memcpy(sd_dst->data, sd_src->data, sd_src->size);
2781  sd_dst->size = sd_src->size;
2782  sd_dst->type = sd_src->type;
2783  ost->st->nb_side_data++;
2784  }
2785  }
2786 
2787  ost->parser = av_parser_init(enc_ctx->codec_id);
2788 
2789  switch (enc_ctx->codec_type) {
2790  case AVMEDIA_TYPE_AUDIO:
2791  if (audio_volume != 256) {
2792  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2793  exit_program(1);
2794  }
2795  enc_ctx->channel_layout = dec_ctx->channel_layout;
2796  enc_ctx->sample_rate = dec_ctx->sample_rate;
2797  enc_ctx->channels = dec_ctx->channels;
2798  enc_ctx->frame_size = dec_ctx->frame_size;
2799  enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2800  enc_ctx->block_align = dec_ctx->block_align;
2801  enc_ctx->initial_padding = dec_ctx->delay;
2802 #if FF_API_AUDIOENC_DELAY
2803  enc_ctx->delay = dec_ctx->delay;
2804 #endif
2805  if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2806  enc_ctx->block_align= 0;
2807  if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2808  enc_ctx->block_align= 0;
2809  break;
2810  case AVMEDIA_TYPE_VIDEO:
2811  enc_ctx->pix_fmt = dec_ctx->pix_fmt;
2812  enc_ctx->width = dec_ctx->width;
2813  enc_ctx->height = dec_ctx->height;
2814  enc_ctx->has_b_frames = dec_ctx->has_b_frames;
2815  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2816  sar =
2818  (AVRational){ enc_ctx->height, enc_ctx->width });
2819  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2820  "with stream copy may produce invalid files\n");
2821  }
2822  else if (ist->st->sample_aspect_ratio.num)
2823  sar = ist->st->sample_aspect_ratio;
2824  else
2825  sar = dec_ctx->sample_aspect_ratio;
2826  ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
2827  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2828  ost->st->r_frame_rate = ist->st->r_frame_rate;
2829  break;
2830  case AVMEDIA_TYPE_SUBTITLE:
2831  enc_ctx->width = dec_ctx->width;
2832  enc_ctx->height = dec_ctx->height;
2833  break;
2834  case AVMEDIA_TYPE_UNKNOWN:
2835  case AVMEDIA_TYPE_DATA:
2837  break;
2838  default:
2839  abort();
2840  }
2841  } else {
2842  if (!ost->enc)
2843  ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
2844  if (!ost->enc) {
2845  /* should only happen when a default codec is not present. */
2846  snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
2847  avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
2848  ret = AVERROR(EINVAL);
2849  goto dump_format;
2850  }
2851 
2852  if (ist)
2854  ost->encoding_needed = 1;
2855 
2856  set_encoder_id(output_files[ost->file_index], ost);
2857 
2858  if (!ost->filter &&
2859  (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
2860  enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
2861  FilterGraph *fg;
2862  fg = init_simple_filtergraph(ist, ost);
2863  if (configure_filtergraph(fg)) {
2864  av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
2865  exit_program(1);
2866  }
2867  }
2868 
2869  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2870  if (!ost->frame_rate.num)
2872  if (ist && !ost->frame_rate.num)
2873  ost->frame_rate = ist->framerate;
2874  if (ist && !ost->frame_rate.num)
2875  ost->frame_rate = ist->st->r_frame_rate;
2876  if (ist && !ost->frame_rate.num) {
2877  ost->frame_rate = (AVRational){25, 1};
2879  "No information "
2880  "about the input framerate is available. Falling "
2881  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
2882  "if you want a different framerate.\n",
2883  ost->file_index, ost->index);
2884  }
2885 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
2886  if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
2888  ost->frame_rate = ost->enc->supported_framerates[idx];
2889  }
2890  // reduce frame rate for mpeg4 to be within the spec limits
2891  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
2892  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
2893  ost->frame_rate.num, ost->frame_rate.den, 65535);
2894  }
2895  }
2896 
2897  switch (enc_ctx->codec_type) {
2898  case AVMEDIA_TYPE_AUDIO:
2899  enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
2900  enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
2901  enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
2902  enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
2903  enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
2904  break;
2905  case AVMEDIA_TYPE_VIDEO:
2906  enc_ctx->time_base = av_inv_q(ost->frame_rate);
2907  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
2908  enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
2909  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
2911  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
2912  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
2913  }
2914  for (j = 0; j < ost->forced_kf_count; j++)
2915  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
2917  enc_ctx->time_base);
2918 
2919  enc_ctx->width = ost->filter->filter->inputs[0]->w;
2920  enc_ctx->height = ost->filter->filter->inputs[0]->h;
2921  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
2922  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
2923  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
2925  if (!strncmp(ost->enc->name, "libx264", 7) &&
2926  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
2929  "No pixel format specified, %s for H.264 encoding chosen.\n"
2930  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
2932  if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
2933  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
2936  "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
2937  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
2939  enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
2940 
2941  ost->st->avg_frame_rate = ost->frame_rate;
2942 
2943  if (!dec_ctx ||
2944  enc_ctx->width != dec_ctx->width ||
2945  enc_ctx->height != dec_ctx->height ||
2946  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
2947  enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
2948  }
2949 
2950  if (ost->forced_keyframes) {
2951  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
2954  if (ret < 0) {
2956  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
2957  return ret;
2958  }
2963 
2964  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
2965  // parse it only for static kf timings
2966  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
2968  }
2969  }
2970  break;
2971  case AVMEDIA_TYPE_SUBTITLE:
2972  enc_ctx->time_base = (AVRational){1, 1000};
2973  if (!enc_ctx->width) {
2974  enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
2975  enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
2976  }
2977  break;
2978  case AVMEDIA_TYPE_DATA:
2979  break;
2980  default:
2981  abort();
2982  break;
2983  }
2984  /* two pass mode */
2985  if (enc_ctx->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2)) {
2986  char logfilename[1024];
2987  FILE *f;
2988 
2989  snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
2990  ost->logfile_prefix ? ost->logfile_prefix :
2992  i);
2993  if (!strcmp(ost->enc->name, "libx264")) {
2994  av_dict_set(&ost->encoder_opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
2995  } else {
2996  if (enc_ctx->flags & CODEC_FLAG_PASS2) {
2997  char *logbuffer;
2998  size_t logbuffer_size;
2999  if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
3000  av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
3001  logfilename);
3002  exit_program(1);
3003  }
3004  enc_ctx->stats_in = logbuffer;
3005  }
3006  if (enc_ctx->flags & CODEC_FLAG_PASS1) {
3007  f = av_fopen_utf8(logfilename, "wb");
3008  if (!f) {
3009  av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
3010  logfilename, strerror(errno));
3011  exit_program(1);
3012  }
3013  ost->logfile = f;
3014  }
3015  }
3016  }
3017  }
3018 
3019  if (ost->disposition) {
3020  static const AVOption opts[] = {
3021  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3022  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3023  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3024  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3025  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3026  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3027  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3028  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3029  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3030  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3031  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3032  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3033  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3034  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3035  { NULL },
3036  };
3037  static const AVClass class = {
3038  .class_name = "",
3039  .item_name = av_default_item_name,
3040  .option = opts,
3041  .version = LIBAVUTIL_VERSION_INT,
3042  };
3043  const AVClass *pclass = &class;
3044 
3045  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3046  if (ret < 0)
3047  goto dump_format;
3048  }
3049  }
3050 
3051  /* open each encoder */
3052  for (i = 0; i < nb_output_streams; i++) {
3053  ost = output_streams[i];
3054  if (ost->encoding_needed) {
3055  AVCodec *codec = ost->enc;
3056  AVCodecContext *dec = NULL;
3057 
3058  if ((ist = get_input_stream(ost)))
3059  dec = ist->dec_ctx;
3060  if (dec && dec->subtitle_header) {
3061  /* ASS code assumes this buffer is null terminated so add extra byte. */
3062  ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3063  if (!ost->enc_ctx->subtitle_header) {
3064  ret = AVERROR(ENOMEM);
3065  goto dump_format;
3066  }
3067  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3068  ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3069  }
3070  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3071  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3072  av_dict_set(&ost->encoder_opts, "side_data_only_packets", "1", 0);
3073 
3074  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3075  if (ret == AVERROR_EXPERIMENTAL)
3076  abort_codec_experimental(codec, 1);
3077  snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
3078  ost->file_index, ost->index);
3079  goto dump_format;
3080  }
3081  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3082  !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
3083  av_buffersink_set_frame_size(ost->filter->filter,
3084  ost->enc_ctx->frame_size);
3085  assert_avoptions(ost->encoder_opts);
3086  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3087  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3088  " It takes bits/s as argument, not kbits/s\n");
3089 
3090  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3091  if (ret < 0) {
3093  "Error initializing the output stream codec context.\n");
3094  exit_program(1);
3095  }
3096 
3097  // copy timebase while removing common factors
3098  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3099  ost->st->codec->codec= ost->enc_ctx->codec;
3100  } else {
3101  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3102  if (ret < 0) {
3104  "Error setting up codec context options.\n");
3105  return ret;
3106  }
3107  // copy timebase while removing common factors
3108  ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
3109  }
3110  }
3111 
3112  /* init input streams */
3113  for (i = 0; i < nb_input_streams; i++)
3114  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3115  for (i = 0; i < nb_output_streams; i++) {
3116  ost = output_streams[i];
3117  avcodec_close(ost->enc_ctx);
3118  }
3119  goto dump_format;
3120  }
3121 
3122  /* discard unused programs */
3123  for (i = 0; i < nb_input_files; i++) {
3124  InputFile *ifile = input_files[i];
3125  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3126  AVProgram *p = ifile->ctx->programs[j];
3127  int discard = AVDISCARD_ALL;
3128 
3129  for (k = 0; k < p->nb_stream_indexes; k++)
3130  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3131  discard = AVDISCARD_DEFAULT;
3132  break;
3133  }
3134  p->discard = discard;
3135  }
3136  }
3137 
3138  /* open files and write file headers */
3139  for (i = 0; i < nb_output_files; i++) {
3140  oc = output_files[i]->ctx;
3141  oc->interrupt_callback = int_cb;
3142  if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3143  snprintf(error, sizeof(error),
3144  "Could not write header for output file #%d "
3145  "(incorrect codec parameters ?): %s",
3146  i, av_err2str(ret));
3147  ret = AVERROR(EINVAL);
3148  goto dump_format;
3149  }
3150 // assert_avoptions(output_files[i]->opts);
3151  if (strcmp(oc->oformat->name, "rtp")) {
3152  want_sdp = 0;
3153  }
3154  }
3155 
3156  dump_format:
3157  /* dump the file output parameters - cannot be done before in case
3158  of stream copy */
3159  for (i = 0; i < nb_output_files; i++) {
3160  av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3161  }
3162 
3163  /* dump the stream mapping */
3164  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3165  for (i = 0; i < nb_input_streams; i++) {
3166  ist = input_streams[i];
3167 
3168  for (j = 0; j < ist->nb_filters; j++) {
3169  if (ist->filters[j]->graph->graph_desc) {
3170  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3171  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3172  ist->filters[j]->name);
3173  if (nb_filtergraphs > 1)
3174  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3175  av_log(NULL, AV_LOG_INFO, "\n");
3176  }
3177  }
3178  }
3179 
3180  for (i = 0; i < nb_output_streams; i++) {
3181  ost = output_streams[i];
3182 
3183  if (ost->attachment_filename) {
3184  /* an attached file */
3185  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3186  ost->attachment_filename, ost->file_index, ost->index);
3187  continue;
3188  }
3189 
3190  if (ost->filter && ost->filter->graph->graph_desc) {
3191  /* output from a complex graph */
3192  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3193  if (nb_filtergraphs > 1)
3194  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3195 
3196  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3197  ost->index, ost->enc ? ost->enc->name : "?");
3198  continue;
3199  }
3200 
3201  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3202  input_streams[ost->source_index]->file_index,
3203  input_streams[ost->source_index]->st->index,
3204  ost->file_index,
3205  ost->index);
3206  if (ost->sync_ist != input_streams[ost->source_index])
3207  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3208  ost->sync_ist->file_index,
3209  ost->sync_ist->st->index);
3210  if (ost->stream_copy)
3211  av_log(NULL, AV_LOG_INFO, " (copy)");
3212  else {
3213  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3214  const AVCodec *out_codec = ost->enc;
3215  const char *decoder_name = "?";
3216  const char *in_codec_name = "?";
3217  const char *encoder_name = "?";
3218  const char *out_codec_name = "?";
3219  const AVCodecDescriptor *desc;
3220 
3221  if (in_codec) {
3222  decoder_name = in_codec->name;
3223  desc = avcodec_descriptor_get(in_codec->id);
3224  if (desc)
3225  in_codec_name = desc->name;
3226  if (!strcmp(decoder_name, in_codec_name))
3227  decoder_name = "native";
3228  }
3229 
3230  if (out_codec) {
3231  encoder_name = out_codec->name;
3232  desc = avcodec_descriptor_get(out_codec->id);
3233  if (desc)
3234  out_codec_name = desc->name;
3235  if (!strcmp(encoder_name, out_codec_name))
3236  encoder_name = "native";
3237  }
3238 
3239  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3240  in_codec_name, decoder_name,
3241  out_codec_name, encoder_name);
3242  }
3243  av_log(NULL, AV_LOG_INFO, "\n");
3244  }
3245 
3246  if (ret) {
3247  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3248  return ret;
3249  }
3250 
3251  if (sdp_filename || want_sdp) {
3252  print_sdp();
3253  }
3254 
3255  transcode_init_done = 1;
3256 
3257  return 0;
3258 }
3259 
3260 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3261 static int need_output(void)
3262 {
3263  int i;
3264 
3265  for (i = 0; i < nb_output_streams; i++) {
3266  OutputStream *ost = output_streams[i];
3267  OutputFile *of = output_files[ost->file_index];
3268  AVFormatContext *os = output_files[ost->file_index]->ctx;
3269 
3270  if (ost->finished ||
3271  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3272  continue;
3273  if (ost->frame_number >= ost->max_frames) {
3274  int j;
3275  for (j = 0; j < of->ctx->nb_streams; j++)
3276  close_output_stream(output_streams[of->ost_index + j]);
3277  continue;
3278  }
3279 
3280  return 1;
3281  }
3282 
3283  return 0;
3284 }
3285 
3286 /**
3287  * Select the output stream to process.
3288  *
3289  * @return selected output stream, or NULL if none available
3290  */
3292 {
3293  int i;
3294  int64_t opts_min = INT64_MAX;
3295  OutputStream *ost_min = NULL;
3296 
3297  for (i = 0; i < nb_output_streams; i++) {
3298  OutputStream *ost = output_streams[i];
3299  int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3300  AV_TIME_BASE_Q);
3301  if (!ost->finished && opts < opts_min) {
3302  opts_min = opts;
3303  ost_min = ost->unavailable ? NULL : ost;
3304  }
3305  }
3306  return ost_min;
3307 }
3308 
3310 {
3311  int i, ret, key;
3312  static int64_t last_time;
3313  if (received_nb_signals)
3314  return AVERROR_EXIT;
3315  /* read_key() returns 0 on EOF */
3316  if(cur_time - last_time >= 100000 && !run_as_daemon){
3317  key = read_key();
3318  last_time = cur_time;
3319  }else
3320  key = -1;
3321  if (key == 'q')
3322  return AVERROR_EXIT;
3323  if (key == '+') av_log_set_level(av_log_get_level()+10);
3324  if (key == '-') av_log_set_level(av_log_get_level()-10);
3325  if (key == 's') qp_hist ^= 1;
3326  if (key == 'h'){
3327  if (do_hex_dump){
3328  do_hex_dump = do_pkt_dump = 0;
3329  } else if(do_pkt_dump){
3330  do_hex_dump = 1;
3331  } else
3332  do_pkt_dump = 1;
3334  }
3335  if (key == 'c' || key == 'C'){
3336  char buf[4096], target[64], command[256], arg[256] = {0};
3337  double time;
3338  int k, n = 0;
3339  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3340  i = 0;
3341  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3342  if (k > 0)
3343  buf[i++] = k;
3344  buf[i] = 0;
3345  if (k > 0 &&
3346  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3347  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3348  target, time, command, arg);
3349  for (i = 0; i < nb_filtergraphs; i++) {
3350  FilterGraph *fg = filtergraphs[i];
3351  if (fg->graph) {
3352  if (time < 0) {
3353  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3354  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3355  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3356  } else if (key == 'c') {
3357  fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3358  ret = AVERROR_PATCHWELCOME;
3359  } else {
3360  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3361  if (ret < 0)
3362  fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3363  }
3364  }
3365  }
3366  } else {
3368  "Parse error, at least 3 arguments were expected, "
3369  "only %d given in string '%s'\n", n, buf);
3370  }
3371  }
3372  if (key == 'd' || key == 'D'){
3373  int debug=0;
3374  if(key == 'D') {
3375  debug = input_streams[0]->st->codec->debug<<1;
3376  if(!debug) debug = 1;
3377  while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3378  debug += debug;
3379  }else
3380  if(scanf("%d", &debug)!=1)
3381  fprintf(stderr,"error parsing debug value\n");
3382  for(i=0;i<nb_input_streams;i++) {
3383  input_streams[i]->st->codec->debug = debug;
3384  }
3385  for(i=0;i<nb_output_streams;i++) {
3386  OutputStream *ost = output_streams[i];
3387  ost->enc_ctx->debug = debug;
3388  }
3389  if(debug) av_log_set_level(AV_LOG_DEBUG);
3390  fprintf(stderr,"debug=%d\n", debug);
3391  }
3392  if (key == '?'){
3393  fprintf(stderr, "key function\n"
3394  "? show this help\n"
3395  "+ increase verbosity\n"
3396  "- decrease verbosity\n"
3397  "c Send command to first matching filter supporting it\n"
3398  "C Send/Que command to all matching filters\n"
3399  "D cycle through available debug modes\n"
3400  "h dump packets/hex press to cycle through the 3 states\n"
3401  "q quit\n"
3402  "s Show QP histogram\n"
3403  );
3404  }
3405  return 0;
3406 }
3407 
3408 #if HAVE_PTHREADS
3409 static void *input_thread(void *arg)
3410 {
3411  InputFile *f = arg;
3412  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3413  int ret = 0;
3414 
3415  while (1) {
3416  AVPacket pkt;
3417  ret = av_read_frame(f->ctx, &pkt);
3418 
3419  if (ret == AVERROR(EAGAIN)) {
3420  av_usleep(10000);
3421  continue;
3422  }
3423  if (ret < 0) {
3424  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3425  break;
3426  }
3427  av_dup_packet(&pkt);
3428  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3429  if (flags && ret == AVERROR(EAGAIN)) {
3430  flags = 0;
3431  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3433  "Thread message queue blocking; consider raising the "
3434  "thread_queue_size option (current value: %d)\n",
3435  f->thread_queue_size);
3436  }
3437  if (ret < 0) {
3438  if (ret != AVERROR_EOF)
3439  av_log(f->ctx, AV_LOG_ERROR,
3440  "Unable to send packet to main thread: %s\n",
3441  av_err2str(ret));
3442  av_free_packet(&pkt);
3443  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3444  break;
3445  }
3446  }
3447 
3448  return NULL;
3449 }
3450 
3451 static void free_input_threads(void)
3452 {
3453  int i;
3454 
3455  for (i = 0; i < nb_input_files; i++) {
3456  InputFile *f = input_files[i];
3457  AVPacket pkt;
3458 
3459  if (!f->in_thread_queue)
3460  continue;
3462  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3463  av_free_packet(&pkt);
3464 
3465  pthread_join(f->thread, NULL);
3466  f->joined = 1;
3467  av_thread_message_queue_free(&f->in_thread_queue);
3468  }
3469 }
3470 
3471 static int init_input_threads(void)
3472 {
3473  int i, ret;
3474 
3475  if (nb_input_files == 1)
3476  return 0;
3477 
3478  for (i = 0; i < nb_input_files; i++) {
3479  InputFile *f = input_files[i];
3480 
3481  if (f->ctx->pb ? !f->ctx->pb->seekable :
3482  strcmp(f->ctx->iformat->name, "lavfi"))
3483  f->non_blocking = 1;
3484  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3485  f->thread_queue_size, sizeof(AVPacket));
3486  if (ret < 0)
3487  return ret;
3488 
3489  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3490  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3491  av_thread_message_queue_free(&f->in_thread_queue);
3492  return AVERROR(ret);
3493  }
3494  }
3495  return 0;
3496 }
3497 
3498 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3499 {
3500  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3501  f->non_blocking ?
3503 }
3504 #endif
3505 
3507 {
3508  if (f->rate_emu) {
3509  int i;
3510  for (i = 0; i < f->nb_streams; i++) {
3511  InputStream *ist = input_streams[f->ist_index + i];
3512  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3513  int64_t now = av_gettime_relative() - ist->start;
3514  if (pts > now)
3515  return AVERROR(EAGAIN);
3516  }
3517  }
3518 
3519 #if HAVE_PTHREADS
3520  if (nb_input_files > 1)
3521  return get_input_packet_mt(f, pkt);
3522 #endif
3523  return av_read_frame(f->ctx, pkt);
3524 }
3525 
3526 static int got_eagain(void)
3527 {
3528  int i;
3529  for (i = 0; i < nb_output_streams; i++)
3530  if (output_streams[i]->unavailable)
3531  return 1;
3532  return 0;
3533 }
3534 
3535 static void reset_eagain(void)
3536 {
3537  int i;
3538  for (i = 0; i < nb_input_files; i++)
3539  input_files[i]->eagain = 0;
3540  for (i = 0; i < nb_output_streams; i++)
3541  output_streams[i]->unavailable = 0;
3542 }
3543 
3544 /*
3545  * Return
3546  * - 0 -- one packet was read and processed
3547  * - AVERROR(EAGAIN) -- no packets were available for selected file,
3548  * this function should be called again
3549  * - AVERROR_EOF -- this function should not be called again
3550  */
3551 static int process_input(int file_index)
3552 {
3553  InputFile *ifile = input_files[file_index];
3554  AVFormatContext *is;
3555  InputStream *ist;
3556  AVPacket pkt;
3557  int ret, i, j;
3558 
3559  is = ifile->ctx;
3560  ret = get_input_packet(ifile, &pkt);
3561 
3562  if (ret == AVERROR(EAGAIN)) {
3563  ifile->eagain = 1;
3564  return ret;
3565  }
3566  if (ret < 0) {
3567  if (ret != AVERROR_EOF) {
3568  print_error(is->filename, ret);
3569  if (exit_on_error)
3570  exit_program(1);
3571  }
3572 
3573  for (i = 0; i < ifile->nb_streams; i++) {
3574  ist = input_streams[ifile->ist_index + i];
3575  if (ist->decoding_needed) {
3576  ret = process_input_packet(ist, NULL);
3577  if (ret>0)
3578  return 0;
3579  }
3580 
3581  /* mark all outputs that don't go through lavfi as finished */
3582  for (j = 0; j < nb_output_streams; j++) {
3583  OutputStream *ost = output_streams[j];
3584 
3585  if (ost->source_index == ifile->ist_index + i &&
3586  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3587  finish_output_stream(ost);
3588  }
3589  }
3590 
3591  ifile->eof_reached = 1;
3592  return AVERROR(EAGAIN);
3593  }
3594 
3595  reset_eagain();
3596 
3597  if (do_pkt_dump) {
3599  is->streams[pkt.stream_index]);
3600  }
3601  /* the following test is needed in case new streams appear
3602  dynamically in stream : we ignore them */
3603  if (pkt.stream_index >= ifile->nb_streams) {
3604  report_new_stream(file_index, &pkt);
3605  goto discard_packet;
3606  }
3607 
3608  ist = input_streams[ifile->ist_index + pkt.stream_index];
3609 
3610  ist->data_size += pkt.size;
3611  ist->nb_packets++;
3612 
3613  if (ist->discard)
3614  goto discard_packet;
3615 
3616  if (debug_ts) {
3617  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3618  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3622  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3623  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3624  av_ts2str(input_files[ist->file_index]->ts_offset),
3625  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3626  }
3627 
3628  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3629  int64_t stime, stime2;
3630  // Correcting starttime based on the enabled streams
3631  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3632  // so we instead do it here as part of discontinuity handling
3633  if ( ist->next_dts == AV_NOPTS_VALUE
3634  && ifile->ts_offset == -is->start_time
3635  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3636  int64_t new_start_time = INT64_MAX;
3637  for (i=0; i<is->nb_streams; i++) {
3638  AVStream *st = is->streams[i];
3639  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3640  continue;
3641  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3642  }
3643  if (new_start_time > is->start_time) {
3644  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3645  ifile->ts_offset = -new_start_time;
3646  }
3647  }
3648 
3649  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3650  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3651  ist->wrap_correction_done = 1;
3652 
3653  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3654  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3655  ist->wrap_correction_done = 0;
3656  }
3657  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3658  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3659  ist->wrap_correction_done = 0;
3660  }
3661  }
3662 
3663  /* add the stream-global side data to the first packet */
3664  if (ist->nb_packets == 1) {
3665  if (ist->st->nb_side_data)
3667  for (i = 0; i < ist->st->nb_side_data; i++) {
3668  AVPacketSideData *src_sd = &ist->st->side_data[i];
3669  uint8_t *dst_data;
3670 
3671  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3672  continue;
3673  if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3674  continue;
3675 
3676  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3677  if (!dst_data)
3678  exit_program(1);
3679 
3680  memcpy(dst_data, src_sd->data, src_sd->size);
3681  }
3682  }
3683 
3684  if (pkt.dts != AV_NOPTS_VALUE)
3685  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3686  if (pkt.pts != AV_NOPTS_VALUE)
3687  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3688 
3689  if (pkt.pts != AV_NOPTS_VALUE)
3690  pkt.pts *= ist->ts_scale;
3691  if (pkt.dts != AV_NOPTS_VALUE)
3692  pkt.dts *= ist->ts_scale;
3693 
3694  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3696  pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3697  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3698  int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3699  int64_t delta = pkt_dts - ifile->last_ts;
3700  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3701  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3702  ifile->ts_offset -= delta;
3704  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3705  delta, ifile->ts_offset);
3706  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3707  if (pkt.pts != AV_NOPTS_VALUE)
3708  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3709  }
3710  }
3711 
3712  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3714  pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3715  !copy_ts) {
3716  int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3717  int64_t delta = pkt_dts - ist->next_dts;
3718  if (is->iformat->flags & AVFMT_TS_DISCONT) {
3719  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3720  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3721  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3722  ifile->ts_offset -= delta;
3724  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3725  delta, ifile->ts_offset);
3726  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3727  if (pkt.pts != AV_NOPTS_VALUE)
3728  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3729  }
3730  } else {
3731  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3732  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3733  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3734  pkt.dts = AV_NOPTS_VALUE;
3735  }
3736  if (pkt.pts != AV_NOPTS_VALUE){
3737  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3738  delta = pkt_pts - ist->next_dts;
3739  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3740  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3741  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3742  pkt.pts = AV_NOPTS_VALUE;
3743  }
3744  }
3745  }
3746  }
3747 
3748  if (pkt.dts != AV_NOPTS_VALUE)
3749  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3750 
3751  if (debug_ts) {
3752  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3754  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3755  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3756  av_ts2str(input_files[ist->file_index]->ts_offset),
3757  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3758  }
3759 
3760  sub2video_heartbeat(ist, pkt.pts);
3761 
3762  ret = process_input_packet(ist, &pkt);
3763  if (ret < 0) {
3764  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
3765  ist->file_index, ist->st->index, av_err2str(ret));
3766  if (exit_on_error)
3767  exit_program(1);
3768  }
3769 
3770 discard_packet:
3771  av_free_packet(&pkt);
3772 
3773  return 0;
3774 }
3775 
3776 /**
3777  * Perform a step of transcoding for the specified filter graph.
3778  *
3779  * @param[in] graph filter graph to consider
3780  * @param[out] best_ist input stream where a frame would allow to continue
3781  * @return 0 for success, <0 for error
3782  */
3783 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
3784 {
3785  int i, ret;
3786  int nb_requests, nb_requests_max = 0;
3787  InputFilter *ifilter;
3788  InputStream *ist;
3789 
3790  *best_ist = NULL;
3791  ret = avfilter_graph_request_oldest(graph->graph);
3792  if (ret >= 0)
3793  return reap_filters(0);
3794 
3795  if (ret == AVERROR_EOF) {
3796  ret = reap_filters(1);
3797  for (i = 0; i < graph->nb_outputs; i++)
3798  close_output_stream(graph->outputs[i]->ost);
3799  return ret;
3800  }
3801  if (ret != AVERROR(EAGAIN))
3802  return ret;
3803 
3804  for (i = 0; i < graph->nb_inputs; i++) {
3805  ifilter = graph->inputs[i];
3806  ist = ifilter->ist;
3807  if (input_files[ist->file_index]->eagain ||
3808  input_files[ist->file_index]->eof_reached)
3809  continue;
3810  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
3811  if (nb_requests > nb_requests_max) {
3812  nb_requests_max = nb_requests;
3813  *best_ist = ist;
3814  }
3815  }
3816 
3817  if (!*best_ist)
3818  for (i = 0; i < graph->nb_outputs; i++)
3819  graph->outputs[i]->ost->unavailable = 1;
3820 
3821  return 0;
3822 }
3823 
3824 /**
3825  * Run a single step of transcoding.
3826  *
3827  * @return 0 for success, <0 for error
3828  */
3829 static int transcode_step(void)
3830 {
3831  OutputStream *ost;
3832  InputStream *ist;
3833  int ret;
3834 
3835  ost = choose_output();
3836  if (!ost) {
3837  if (got_eagain()) {
3838  reset_eagain();
3839  av_usleep(10000);
3840  return 0;
3841  }
3842  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
3843  return AVERROR_EOF;
3844  }
3845 
3846  if (ost->filter) {
3847  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
3848  return ret;
3849  if (!ist)
3850  return 0;
3851  } else {
3852  av_assert0(ost->source_index >= 0);
3853  ist = input_streams[ost->source_index];
3854  }
3855 
3856  ret = process_input(ist->file_index);
3857  if (ret == AVERROR(EAGAIN)) {
3858  if (input_files[ist->file_index]->eagain)
3859  ost->unavailable = 1;
3860  return 0;
3861  }
3862 
3863  if (ret < 0)
3864  return ret == AVERROR_EOF ? 0 : ret;
3865 
3866  return reap_filters(0);
3867 }
3868 
3869 /*
3870  * The following code is the main loop of the file converter
3871  */
3872 static int transcode(void)
3873 {
3874  int ret, i;
3875  AVFormatContext *os;
3876  OutputStream *ost;
3877  InputStream *ist;
3878  int64_t timer_start;
3879 
3880  ret = transcode_init();
3881  if (ret < 0)
3882  goto fail;
3883 
3884  if (stdin_interaction) {
3885  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
3886  }
3887 
3888  timer_start = av_gettime_relative();
3889 
3890 #if HAVE_PTHREADS
3891  if ((ret = init_input_threads()) < 0)
3892  goto fail;
3893 #endif
3894 
3895  while (!received_sigterm) {
3896  int64_t cur_time= av_gettime_relative();
3897 
3898  /* if 'q' pressed, exits */
3899  if (stdin_interaction)
3900  if (check_keyboard_interaction(cur_time) < 0)
3901  break;
3902 
3903  /* check if there's any stream where output is still needed */
3904  if (!need_output()) {
3905  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
3906  break;
3907  }
3908 
3909  ret = transcode_step();
3910  if (ret < 0) {
3911  if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
3912  continue;
3913  } else {
3914  char errbuf[128];
3915  av_strerror(ret, errbuf, sizeof(errbuf));
3916 
3917  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
3918  break;
3919  }
3920  }
3921 
3922  /* dump report by using the output first video and audio streams */
3923  print_report(0, timer_start, cur_time);
3924  }
3925 #if HAVE_PTHREADS
3926  free_input_threads();
3927 #endif
3928 
3929  /* at the end of stream, we must flush the decoder buffers */
3930  for (i = 0; i < nb_input_streams; i++) {
3931  ist = input_streams[i];
3932  if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
3933  process_input_packet(ist, NULL);
3934  }
3935  }
3936  flush_encoders();
3937 
3938  term_exit();
3939 
3940  /* write the trailer if needed and close file */
3941  for (i = 0; i < nb_output_files; i++) {
3942  os = output_files[i]->ctx;
3943  av_write_trailer(os);
3944  }
3945 
3946  /* dump report by using the first video and audio streams */
3947  print_report(1, timer_start, av_gettime_relative());
3948 
3949  /* close each encoder */
3950  for (i = 0; i < nb_output_streams; i++) {
3951  ost = output_streams[i];
3952  if (ost->encoding_needed) {
3953  av_freep(&ost->enc_ctx->stats_in);
3954  }
3955  }
3956 
3957  /* close each decoder */
3958  for (i = 0; i < nb_input_streams; i++) {
3959  ist = input_streams[i];
3960  if (ist->decoding_needed) {
3961  avcodec_close(ist->dec_ctx);
3962  if (ist->hwaccel_uninit)
3963  ist->hwaccel_uninit(ist->dec_ctx);
3964  }
3965  }
3966 
3967  /* finished ! */
3968  ret = 0;
3969 
3970  fail:
3971 #if HAVE_PTHREADS
3972  free_input_threads();
3973 #endif
3974 
3975  if (output_streams) {
3976  for (i = 0; i < nb_output_streams; i++) {
3977  ost = output_streams[i];
3978  if (ost) {
3979  if (ost->logfile) {
3980  fclose(ost->logfile);
3981  ost->logfile = NULL;
3982  }
3983  av_freep(&ost->forced_kf_pts);
3984  av_freep(&ost->apad);
3985  av_freep(&ost->disposition);
3986  av_dict_free(&ost->encoder_opts);
3987  av_dict_free(&ost->swr_opts);
3988  av_dict_free(&ost->resample_opts);
3989  av_dict_free(&ost->bsf_args);
3990  }
3991  }
3992  }
3993  return ret;
3994 }
3995 
3996 
3997 static int64_t getutime(void)
3998 {
3999 #if HAVE_GETRUSAGE
4000  struct rusage rusage;
4001 
4002  getrusage(RUSAGE_SELF, &rusage);
4003  return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4004 #elif HAVE_GETPROCESSTIMES
4005  HANDLE proc;
4006  FILETIME c, e, k, u;
4007  proc = GetCurrentProcess();
4008  GetProcessTimes(proc, &c, &e, &k, &u);
4009  return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4010 #else
4011  return av_gettime_relative();
4012 #endif
4013 }
4014 
4015 static int64_t getmaxrss(void)
4016 {
4017 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4018  struct rusage rusage;
4019  getrusage(RUSAGE_SELF, &rusage);
4020  return (int64_t)rusage.ru_maxrss * 1024;
4021 #elif HAVE_GETPROCESSMEMORYINFO
4022  HANDLE proc;
4023  PROCESS_MEMORY_COUNTERS memcounters;
4024  proc = GetCurrentProcess();
4025  memcounters.cb = sizeof(memcounters);
4026  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4027  return memcounters.PeakPagefileUsage;
4028 #else
4029  return 0;
4030 #endif
4031 }
4032 
4033 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4034 {
4035 }
4036 
4037 int main(int argc, char **argv)
4038 {
4039  int ret;
4040  int64_t ti;
4041 
4043 
4044  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4045 
4047  parse_loglevel(argc, argv, options);
4048 
4049  if(argc>1 && !strcmp(argv[1], "-d")){
4050  run_as_daemon=1;
4052  argc--;
4053  argv++;
4054  }
4055 
4057 #if CONFIG_AVDEVICE
4059 #endif
4061  av_register_all();
4063 
4064  show_banner(argc, argv, options);
4065 
4066  term_init();
4067 
4068  /* parse options and open all input/output files */
4069  ret = ffmpeg_parse_options(argc, argv);
4070  if (ret < 0)
4071  exit_program(1);
4072 
4073  if (nb_output_files <= 0 && nb_input_files == 0) {
4074  show_usage();
4075  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4076  exit_program(1);
4077  }
4078 
4079  /* file converter / grab */
4080  if (nb_output_files <= 0) {
4081  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4082  exit_program(1);
4083  }
4084 
4085 // if (nb_input_files == 0) {
4086 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4087 // exit_program(1);
4088 // }
4089 
4090  current_time = ti = getutime();
4091  if (transcode() < 0)
4092  exit_program(1);
4093  ti = getutime() - ti;
4094  if (do_benchmark) {
4095  printf("bench: utime=%0.3fs\n", ti / 1000000.0);
4096  }
4097  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4100  exit_program(69);
4101 
4103  return main_return_code;
4104 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1471
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:268
#define extra_bits(eb)
Definition: intrax8.c:152
int got_output
Definition: ffmpeg.h:293
#define AV_DISPOSITION_METADATA
Definition: avformat.h:826
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:2577
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:1724
int64_t first_dts
Timestamp corresponding to the last dts sync point.
Definition: avformat.h:1018
int guess_input_channel_layout(InputStream *ist)
Definition: ffmpeg.c:1824
#define NULL
Definition: coverity.c:32
int frame_number
Definition: ffmpeg.h:385
const struct AVCodec * codec
Definition: avcodec.h:1250
Definition: ffmpeg.h:364
AVRational framerate
Definition: avcodec.h:3023
enum HWAccelID active_hwaccel_id
Definition: ffmpeg.h:319
const char * s
Definition: avisynth_c.h:631
Bytestream IO Context.
Definition: avio.h:111
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static enum AVPixelFormat pix_fmt
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:464
void term_init(void)
Definition: ffmpeg.c:328
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:281
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
int64_t av_frame_get_pkt_duration(const AVFrame *frame)
uint8_t * name
Definition: ffmpeg.h:228
int nb_outputs
Definition: ffmpeg.h:244
int linesize[AV_NUM_DATA_POINTERS]
number of bytes per line
Definition: avcodec.h:3454
void av_free_packet(AVPacket *pkt)
Free a packet.
Definition: avpacket.c:280
AVDictionary * swr_opts
Definition: ffmpeg.h:434
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:254
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2090
int resample_channels
Definition: ffmpeg.h:288
#define av_realloc_f(p, o, n)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:171
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
void term_exit(void)
Definition: ffmpeg.c:307
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: utils.c:2932
int stream_copy
Definition: ffmpeg.h:440
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:913
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3482
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1520
#define FF_DEBUG_VIS_QP
only access through AVOptions from outside libavcodec
Definition: avcodec.h:2587
AVOption.
Definition: opt.h:255
AVRational frame_rate
Definition: ffmpeg.h:405
int64_t * forced_kf_pts
Definition: ffmpeg.h:413
void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, int err)
Set the receiving error code.
static void sub2video_flush(InputStream *ist)
Definition: ffmpeg.c:287
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:2663
char * filters
filtergraph associated to the -filter option
Definition: ffmpeg.h:429
#define CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:737
static int process_input(int file_index)
Definition: ffmpeg.c:3551
#define CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: avcodec.h:882
int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
Definition: mux.c:428
int exit_on_error
Definition: ffmpeg_opt.c:99
const char * fmt
Definition: avisynth_c.h:632
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
#define LIBAVUTIL_VERSION_INT
Definition: version.h:62
Main libavfilter public API header.
int64_t pos
byte position in stream, -1 if unknown
Definition: avcodec.h:1187
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:81
#define CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:736
static int run_as_daemon
Definition: ffmpeg.c:125
Memory buffer source API.
AVFrame * coded_frame
the picture in the bitstream
Definition: avcodec.h:2745
void av_log_set_level(int level)
Set the log level.
Definition: log.c:382
AVRational framerate
Definition: ffmpeg.h:277
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
AVCodecParserContext * parser
Definition: ffmpeg.h:448
static int64_t cur_time
Definition: ffserver.c:253
FILE * av_fopen_utf8(const char *path, const char *mode)
Open a file using a UTF-8 filename.
Definition: file_open.c:92
int decoding_needed
Definition: ffmpeg.h:252
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
Definition: avformat.h:914
int num
numerator
Definition: rational.h:44
FilterGraph * init_simple_filtergraph(InputStream *ist, OutputStream *ost)
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
Definition: ffmpeg.c:1455
#define vsnprintf
Definition: snprintf.h:36
int rotate_overridden
Definition: ffmpeg.h:408
int index
stream index in AVFormatContext
Definition: avformat.h:843
int size
Definition: avcodec.h:1163
static int64_t getmaxrss(void)
Definition: ffmpeg.c:4015
const char * b
Definition: vf_curves.c:109
static int nb_frames_dup
Definition: ffmpeg.c:126
static InputStream * get_input_stream(OutputStream *ost)
Definition: ffmpeg.c:2465
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:461
#define AV_DISPOSITION_DUB
Definition: avformat.h:798
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1623
int eagain
Definition: ffmpeg.h:340
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1130
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1444
AVBitStreamFilterContext * bitstream_filters
Definition: ffmpeg.h:395
static void abort_codec_experimental(AVCodec *c, int encoder)
Definition: ffmpeg.c:556
#define AV_DISPOSITION_HEARING_IMPAIRED
stream for hearing impaired audiences
Definition: avformat.h:810
unsigned num_rects
Definition: avcodec.h:3511
AVFrame * filter_frame
Definition: ffmpeg.h:259
static int transcode_init(void)
Definition: ffmpeg.c:2597
static int compare_int64(const void *a, const void *b)
Definition: ffmpeg.c:2472
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
Definition: avcodec.h:2502
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:40
int do_benchmark_all
Definition: ffmpeg_opt.c:92
enum AVMediaType type
Definition: avcodec.h:3194
#define AV_DISPOSITION_CLEAN_EFFECTS
stream without voice
Definition: avformat.h:812
#define FF_ARRAY_ELEMS(a)
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:76
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:652
discard all
Definition: avcodec.h:669
AVPacketSideData * side_data
An array of side data that applies to the whole stream (i.e.
Definition: avformat.h:954
int(* hwaccel_get_buffer)(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.h:322
int64_t input_ts_offset
Definition: ffmpeg.h:342
int do_hex_dump
Definition: ffmpeg_opt.c:93
static AVPacket pkt
int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of audio.
Definition: utils.c:1837
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:2727
#define FF_DEBUG_VIS_MB_TYPE
only access through AVOptions from outside libavcodec
Definition: avcodec.h:2588
int nb_input_streams
Definition: ffmpeg.c:138
void avcodec_register_all(void)
Register all the codecs, parsers and bitstream filters which were enabled at configuration time...
Definition: allcodecs.c:68
const char * name
Definition: ffmpeg.h:69
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: utils.c:2730
int av_dup_packet(AVPacket *pkt)
Definition: avpacket.c:248
static void report_new_stream(int input_index, AVPacket *pkt)
Definition: ffmpeg.c:2541
Picture data structure.
Definition: avcodec.h:3452
uint64_t packets_written
Definition: ffmpeg.h:454
AVCodec.
Definition: avcodec.h:3181
#define VSYNC_VFR
Definition: ffmpeg.h:54
int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src)
Copy the settings of the source AVCodecContext into the destination AVCodecContext.
Definition: options.c:180
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
Definition: avcodec.h:2022
#define AVFMT_TS_NONSTRICT
Format does not require strictly increasing timestamps, but they must still be monotonic.
Definition: avformat.h:483
int avpicture_fill(AVPicture *picture, const uint8_t *ptr, enum AVPixelFormat pix_fmt, int width, int height)
Setup the picture fields based on the specified image parameters and the provided image data buffer...
Definition: avpicture.c:34
int print_stats
Definition: ffmpeg_opt.c:100
float dts_error_threshold
Definition: ffmpeg_opt.c:84
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:465
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
uint64_t data_size
Definition: ffmpeg.h:452
static int decode_interrupt_cb(void *ctx)
Definition: ffmpeg.c:419
static int process_input_packet(InputStream *ist, const AVPacket *pkt)
Definition: ffmpeg.c:2184
#define log2(x)
Definition: libm.h:122
#define AV_DISPOSITION_KARAOKE
Definition: avformat.h:802
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1369
AVSubtitleRect ** rects
Definition: avcodec.h:3512
enum AVAudioServiceType audio_service_type
Type of service that the audio stream conveys.
Definition: avcodec.h:2060
int encoding_needed
Definition: ffmpeg.h:384
static void update_benchmark(const char *fmt,...)
Definition: ffmpeg.c:561
static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
Definition: ffmpeg.c:4033
static int check_keyboard_interaction(int64_t cur_time)
Definition: ffmpeg.c:3309
Format I/O context.
Definition: avformat.h:1272
uint64_t samples_decoded
Definition: ffmpeg.h:334
memory buffer sink API for audio and video
struct InputStream * ist
Definition: ffmpeg.h:219
unsigned int nb_stream_indexes
Definition: avformat.h:1210
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
enum HWAccelID id
Definition: ffmpeg.h:71
#define CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:746
int64_t cur_dts
Definition: avformat.h:1019
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:3484
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:641
uint64_t frames_decoded
Definition: ffmpeg.h:333
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVFilterGraph * graph
Definition: ffmpeg.h:238
Public dictionary API.
static void do_video_out(AVFormatContext *s, OutputStream *ost, AVFrame *next_picture, double sync_ipts)
Definition: ffmpeg.c:883
char * logfile_prefix
Definition: ffmpeg.h:424
static uint8_t * subtitle_out
Definition: ffmpeg.c:133
#define DEFAULT_PASS_LOGFILENAME_PREFIX
Definition: ffmpeg.c:135
static int main_return_code
Definition: ffmpeg.c:316
static int64_t start_time
Definition: ffplay.c:320
int copy_initial_nonkeyframes
Definition: ffmpeg.h:442
void register_exit(void(*cb)(int ret))
Register a program-specific cleanup routine.
Definition: cmdutils.c:118
if()
Definition: avfilter.c:975
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_RAWPICTURE, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, AVFMT_TS_NONSTRICT
Definition: avformat.h:532
uint8_t
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:135
Opaque data information usually continuous.
Definition: avutil.h:196
int av_thread_message_queue_recv(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Receive a message from the queue.
int av_parser_change(AVCodecParserContext *s, AVCodecContext *avctx, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe)
Definition: parser.c:186
float delta
static void sub2video_push_ref(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:210
AVOptions.
int subtitle_header_size
Definition: avcodec.h:2958
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:642
enum AVCodecID av_codec_get_id(const struct AVCodecTag *const *tags, unsigned int tag)
Get the AVCodecID for the given codec tag tag.
miscellaneous OS support macros and functions.
timestamp utils, mostly useful for debugging/logging purposes
attribute_deprecated void(* destruct)(struct AVPacket *)
Definition: avcodec.h:1183
uint8_t * data[AV_NUM_DATA_POINTERS]
pointers to the image data planes
Definition: avcodec.h:3453
int stdin_interaction
Definition: ffmpeg_opt.c:102
FILE * logfile
Definition: ffmpeg.h:425
AVDictionary * opts
Definition: ffmpeg.h:462
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:67
#define media_type_string
Definition: cmdutils.h:577
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: avcodec.h:1032
int av_thread_message_queue_send(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Send a message on the queue.
libavcodec/libavfilter gluing utilities
#define ECHO(name, type, min, max)
Definition: af_aecho.c:185
static const HWAccel * get_hwaccel(enum AVPixelFormat pix_fmt)
Definition: ffmpeg.c:2361
static int need_output(void)
Definition: ffmpeg.c:3261
int last_droped
Definition: ffmpeg.h:401
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:363
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:257
int nb_side_data
The number of elements in the AVStream.side_data array.
Definition: avformat.h:958
static double psnr(double d)
Definition: ffmpeg.c:1183
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1355
int do_benchmark
Definition: ffmpeg_opt.c:91
int audio_sync_method
Definition: ffmpeg_opt.c:87
int shortest
Definition: ffmpeg.h:468
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1340
int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of video.
Definition: utils.c:2101
int av_codec_get_tag2(const struct AVCodecTag *const *tags, enum AVCodecID id, unsigned int *tag)
Get the codec tag for the given codec id.
void avfilter_register_all(void)
Initialize the filter system.
Definition: allfilters.c:40
static int64_t getutime(void)
Definition: ffmpeg.c:3997
static AVFrame * frame
void av_pkt_dump_log2(void *avcl, int level, const AVPacket *pkt, int dump_payload, const AVStream *st)
Send a nice dump of a packet to the log.
Definition: dump.c:111
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:94
const char * name
Definition: avcodec.h:5082
static double av_q2d(AVRational a)
Convert rational to double.
Definition: rational.h:80
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:39
int nb_streams
Definition: ffmpeg.h:348
uint8_t * data
Definition: avcodec.h:1162
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVDictionary * resample_opts
Definition: ffmpeg.h:435
static void parse_forced_key_frames(char *kf, OutputStream *ost, AVCodecContext *avctx)
Definition: ffmpeg.c:2478
list ifile
Definition: normalize.py:6
#define FFMIN3(a, b, c)
Definition: common.h:67
AVFilterContext * filter
Definition: ffmpeg.h:225
int avformat_network_init(void)
Do global initialization of network components.
Definition: utils.c:4123
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:84
int nb_input_files
Definition: ffmpeg.c:140
#define AVERROR_EOF
End of file.
Definition: error.h:55
static int read_key(void)
Definition: ffmpeg.c:364
static int reap_filters(int flush)
Get and encode new output from any of the filtergraphs, without causing activity. ...
Definition: ffmpeg.c:1243
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
#define lrintf(x)
Definition: libm_mips.h:70
int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size)
Generate an SDP for an RTP session.
Definition: sdp.c:819
static void do_video_stats(OutputStream *ost, int frame_size)
Definition: ffmpeg.c:1188
int resample_sample_rate
Definition: ffmpeg.h:287
uint8_t * data
Definition: avcodec.h:1112
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:367
#define CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:759
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:494
const AVClass * avcodec_get_frame_class(void)
Get the AVClass for AVFrame.
Definition: options.c:283
int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame)
Accessors for some AVFrame fields.
ptrdiff_t size
Definition: opengl_enc.c:101
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:3485
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:365
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:2720
AVCodec * dec
Definition: ffmpeg.h:257
static int64_t duration
Definition: ffplay.c:321
enum AVDiscard discard
selects which program to discard and which to feed to the caller
Definition: avformat.h:1208
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:2494
void avio_write(AVIOContext *s, const unsigned char *buf, int size)
Definition: aviobuf.c:177
int top_field_first
Definition: ffmpeg.h:278
#define AVFMT_FLAG_BITEXACT
When muxing, try to avoid writing any random/volatile data to the output.
Definition: avformat.h:1400
int nb_output_streams
Definition: ffmpeg.c:143
int file_index
Definition: ffmpeg.h:248
int duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1180
const OptionDef options[]
Definition: ffserver.c:3798
struct AVBitStreamFilterContext * next
Definition: avcodec.h:5077
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1967
unsigned int * stream_index
Definition: avformat.h:1209
struct InputStream::sub2video sub2video
int resample_pix_fmt
Definition: ffmpeg.h:284
int resample_height
Definition: ffmpeg.h:282
int wrap_correction_done
Definition: ffmpeg.h:269
int64_t filter_in_rescale_delta_last
Definition: ffmpeg.h:271
#define av_log(a,...)
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:258
int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
#define AV_DISPOSITION_CAPTIONS
To specify text track kind (different from subtitles default).
Definition: avformat.h:824
unsigned m
Definition: audioconvert.c:187
int av_buffersrc_add_ref(AVFilterContext *buffer_src, AVFilterBufferRef *picref, int flags)
Add buffer data in picref to buffer_src.
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:117
struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:1291
int64_t next_dts
Definition: ffmpeg.h:264
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1208
AVRational av_add_q(AVRational b, AVRational c)
Add two rationals.
Definition: rational.c:93
void av_buffer_default_free(void *opaque, uint8_t *data)
Default free callback, which calls av_free() on the buffer data.
Definition: buffer.c:61
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:140
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:477
Main libavdevice API header.
Callback for checking whether to abort blocking functions.
Definition: avio.h:50
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: utils.c:2843
static volatile int transcode_init_done
Definition: ffmpeg.c:315
libswresample public header
enum AVCodecID id
Definition: avcodec.h:3195
int rate_emu
Definition: ffmpeg.h:351
int width
width and height of the video frame
Definition: frame.h:220
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:71
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1533
static void finish_output_stream(OutputStream *ost)
Definition: ffmpeg.c:1224
static void reset_eagain(void)
Definition: ffmpeg.c:3535
static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
Definition: ffmpeg.c:587
int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
Definition: utils.c:2352
enum AVPixelFormat hwaccel_pix_fmt
Definition: ffmpeg.h:324
void * av_opt_ptr(const AVClass *class, void *obj, const char *name)
Gets a pointer to the requested field in a struct.
Definition: opt.c:1549
void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another...
Definition: avpacket.c:594
int ffmpeg_parse_options(int argc, char **argv)
Definition: ffmpeg_opt.c:2811
FilterGraph ** filtergraphs
Definition: ffmpeg.c:147
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:424
AVFilterContext * filter
Definition: ffmpeg.h:218
#define CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: avcodec.h:874
#define CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:763
void(* hwaccel_uninit)(AVCodecContext *s)
Definition: ffmpeg.h:321
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:474
int64_t start
Definition: ffmpeg.h:261
#define CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:824
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3483
av_default_item_name
uint64_t nb_packets
Definition: ffmpeg.h:331
#define AVERROR(e)
Definition: error.h:43
int64_t last_mux_dts
Definition: ffmpeg.h:394
int video_sync_method
Definition: ffmpeg_opt.c:88
int av_opt_eval_flags(void *obj, const AVOption *o, const char *val, int *flags_out)
static int64_t decode_error_stat[2]
Definition: ffmpeg.c:128
#define VSYNC_VSCFR
Definition: ffmpeg.h:55
int avfilter_link_get_channels(AVFilterLink *link)
Get the number of channels of a link.
Definition: avfilter.c:175
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:148
char * sdp_filename
Definition: ffmpeg_opt.c:80
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
int last_nb0_frames[3]
Definition: ffmpeg.h:402
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
Definition: ffmpeg.c:2123
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
const char * r
Definition: vf_curves.c:107
const char *const forced_keyframes_const_names[]
Definition: ffmpeg.c:112
int capabilities
Codec capabilities.
Definition: avcodec.h:3200
int initial_padding
Audio only.
Definition: avcodec.h:3015
struct AVCodecParserContext * av_stream_get_parser(const AVStream *s)
Definition: utils.c:122
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
void av_bitstream_filter_close(AVBitStreamFilterContext *bsf)
Release bitstream filter context.
unsigned int nb_programs
Definition: avformat.h:1421
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:199
preferred ID for decoding MPEG audio layer 1, 2 or 3
Definition: avcodec.h:421
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: avcodec.h:1145
const char * arg
Definition: jacosubdec.c:66
int flags
CODEC_FLAG_*.
Definition: avcodec.h:1335
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:491
AVChapter ** chapters
Definition: avformat.h:1472
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:342
int rc_max_rate
maximum bitrate
Definition: avcodec.h:2327
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:123
simple assert() macros that are a bit more flexible than ISO C assert().
enum AVPacketSideDataType type
Definition: avcodec.h:1114
int av_log_get_level(void)
Get the current log level.
Definition: log.c:377
const char * name
Name of the codec implementation.
Definition: avcodec.h:3188
static int check_recording_time(OutputStream *ost)
Definition: ffmpeg.c:737
int(* init)(AVCodecContext *s)
Definition: ffmpeg.h:70
int side_data_elems
Definition: avcodec.h:1174
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:28
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:47
void remove_avoptions(AVDictionary **a, AVDictionary *b)
Definition: ffmpeg.c:538
int force_fps
Definition: ffmpeg.h:406
AVRational avg_frame_rate
Average framerate.
Definition: avformat.h:925
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1205
#define FFMAX(a, b)
Definition: common.h:64
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:126
int qp_hist
Definition: ffmpeg_opt.c:101
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:83
float frame_drop_threshold
Definition: ffmpeg_opt.c:89
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1168
const AVCodecDescriptor * avcodec_descriptor_get(enum AVCodecID id)
Definition: codec_desc.c:2891
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2046
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare 2 timestamps each in its own timebases.
Definition: mathematics.c:145
uint32_t end_display_time
Definition: avcodec.h:3510
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:3513
AVCodecContext * codec
Codec context associated with this stream.
Definition: avformat.h:861
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:2304
OutputFilter * filter
Definition: ffmpeg.h:427
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:427
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVRational frame_aspect_ratio
Definition: ffmpeg.h:410
#define AV_DISPOSITION_LYRICS
Definition: avformat.h:801
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1484
#define AV_DISPOSITION_FORCED
Track should be used during playback by default.
Definition: avformat.h:809
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:630
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1328
static int nb_frames_drop
Definition: ffmpeg.c:127
A bitmap, pict will be set.
Definition: avcodec.h:3464
int nb_output_files
Definition: ffmpeg.c:145
int seekable
A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.
Definition: avio.h:160
int bit_rate
the average bitrate
Definition: avcodec.h:1305
int void avio_flush(AVIOContext *s)
Force flushing of buffered data.
Definition: aviobuf.c:197
audio channel layout utility functions
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:242
static int transcode(void)
Definition: ffmpeg.c:3872
char filename[1024]
input or output filename
Definition: avformat.h:1348
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
AVPicture pict
data+linesize for the bitmap of this subtitle.
Definition: avcodec.h:3492
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:127
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:247
#define FFMIN(a, b)
Definition: common.h:66
float y
#define VSYNC_AUTO
Definition: ffmpeg.h:51
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:397
int saw_first_ts
Definition: ffmpeg.h:274
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
Definition: ffmpeg.c:1844
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:78
#define FFSIGN(a)
Definition: common.h:62
struct OutputStream * ost
Definition: ffmpeg.h:226
ret
Definition: avfilter.c:974
int width
picture width / height.
Definition: avcodec.h:1414
PVOID HANDLE
char * apad
Definition: ffmpeg.h:437
double forced_keyframes_expr_const_values[FKF_NB]
Definition: ffmpeg.h:418
const char * name
Definition: avformat.h:513
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
void av_parser_close(AVCodecParserContext *s)
Definition: parser.c:221
int nb_filtergraphs
Definition: ffmpeg.c:148
static av_always_inline int pthread_join(pthread_t thread, void **value_ptr)
Definition: os2threads.h:80
int64_t last_ts
Definition: ffmpeg.h:344
#define FFABS(a)
Definition: common.h:61
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:283
Keep a reference to the frame.
Definition: buffersrc.h:62
int do_pkt_dump
Definition: ffmpeg_opt.c:94
int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: utils.c:2498
int64_t max_frames
Definition: ffmpeg.h:398
#define CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:756
int(* hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame)
Definition: ffmpeg.h:323
static void term_exit_sigsafe(void)
Definition: ffmpeg.c:299
float u
int audio_channels_mapped
Definition: ffmpeg.h:422
int n
Definition: avisynth_c.h:547
AVDictionary * metadata
Definition: avformat.h:916
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:1378
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:963
Usually treated as AVMEDIA_TYPE_DATA.
Definition: avutil.h:193
Opaque data information usually sparse.
Definition: avutil.h:198
static uint32_t codec_flags(enum AVCodecID codec_id)
Definition: cafenc.c:37
const char program_name[]
program name, defined by the program for show_version().
Definition: ffmpeg.c:107
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
static int got_eagain(void)
Definition: ffmpeg.c:3526
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:107
static void sub2video_update(InputStream *ist, AVSubtitle *sub)
Definition: ffmpeg.c:223
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the nearest value in q_list to q.
Definition: rational.c:141
#define AVERROR_EXPERIMENTAL
Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it...
Definition: error.h:72
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:2970
void exit_program(int ret)
Wraps exit with a program-specific cleanup routine.
Definition: cmdutils.c:123
static void flush(AVCodecContext *avctx)
Definition: aacdec.c:514
int av_packet_split_side_data(AVPacket *pkt)
Definition: avpacket.c:404
#define AV_DISPOSITION_VISUAL_IMPAIRED
stream for visual impaired audiences
Definition: avformat.h:811
int ret
Definition: ffmpeg.h:294
int audio_volume
Definition: ffmpeg_opt.c:86
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
Stream structure.
Definition: avformat.h:842
#define av_dlog(pctx,...)
av_dlog macros
Definition: log.h:330
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:472
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:232
InputFilter ** filters
Definition: ffmpeg.h:309
int fix_sub_duration
Definition: ffmpeg.h:291
#define VSYNC_DROP
Definition: ffmpeg.h:56
int64_t recording_time
Definition: ffmpeg.h:347
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:4136
Definition: ffmpeg.h:68
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:2005
static av_always_inline int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
Definition: os2threads.h:64
#define AV_DISPOSITION_DEFAULT
Definition: avformat.h:797
static int sub2video_get_blank_frame(InputStream *ist)
Definition: ffmpeg.c:166
static void set_encoder_id(OutputFile *of, OutputStream *ost)
Definition: ffmpeg.c:2556
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
int av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe)
Filter bitstream.
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: frame.h:351
int frame_size
Definition: mxfenc.c:1803
attribute_deprecated void av_log_ask_for_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message asking for a sample.
AVCodecParserContext * av_parser_init(int codec_id)
Definition: parser.c:50
#define AV_DISPOSITION_DESCRIPTIONS
Definition: avformat.h:825
int ost_index
Definition: ffmpeg.h:463
AVS_Value src
Definition: avisynth_c.h:482
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: utils.c:719
enum AVMediaType codec_type
Definition: avcodec.h:1249
double ts_scale
Definition: ffmpeg.h:273
int unavailable
Definition: ffmpeg.h:439
void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq, int err)
Set the sending error code.
const AVRational * supported_framerates
array of supported framerates, or NULL if any, array is terminated by {0,0}
Definition: avcodec.h:3201
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:162
Immediately push the frame to the output.
Definition: buffersrc.h:55
static int init_input_stream(int ist_index, char *error, int error_len)
Definition: ffmpeg.c:2418
enum AVCodecID codec_id
Definition: avcodec.h:1258
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:312
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:253
int av_opt_set_dict(void *obj, AVDictionary **options)
Set all the options from a given dictionary on an object.
Definition: opt.c:1479
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg.c:181
float max_error_rate
Definition: ffmpeg_opt.c:104
int sample_rate
samples per second
Definition: avcodec.h:1985
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:199
uint64_t frames_encoded
Definition: ffmpeg.h:456
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
Definition: ffmpeg.c:1978
AVIOContext * pb
I/O context.
Definition: avformat.h:1314
int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb)
Rescale a timestamp while preserving known durations.
Definition: mathematics.c:166
int ist_index
Definition: ffmpeg.h:341
const struct AVCodecTag *const * codec_tag
List of supported codec_id-codec_tag pairs, ordered by "better choice first".
Definition: avformat.h:538
uint8_t flags
Definition: pixdesc.h:90
int debug
debug
Definition: avcodec.h:2565
static void print_sdp(void)
Definition: ffmpeg.c:2326
const char * graph_desc
Definition: ffmpeg.h:236
int guess_layout_max
Definition: ffmpeg.h:279
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
int64_t start_time
Definition: ffmpeg.h:345
#define AVFMT_RAWPICTURE
Format wants AVPicture structure for raw picture data.
Definition: avformat.h:468
main external API structure.
Definition: avcodec.h:1241
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:341
static void ffmpeg_cleanup(int ret)
Definition: ffmpeg.c:426
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:765
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:2824
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:252
const char * attachment_filename
Definition: ffmpeg.h:441
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:1273
static int check_output_constraints(InputStream *ist, OutputStream *ost)
Definition: ffmpeg.c:1707
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
a very simple circular buffer FIFO implementation
void assert_avoptions(AVDictionary *m)
Definition: ffmpeg.c:547
AVCodecContext * enc_ctx
Definition: ffmpeg.h:396
void * buf
Definition: avisynth_c.h:553
AVFrame * decoded_frame
Definition: ffmpeg.h:258
GLint GLenum type
Definition: opengl_enc.c:105
int extradata_size
Definition: avcodec.h:1356
Perform non-blocking operation.
Definition: threadmessage.h:31
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:69
Replacements for frequently missing libm functions.
struct AVBitStreamFilter * filter
Definition: avcodec.h:5075
AVCodecContext * dec_ctx
Definition: ffmpeg.h:256
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
Perform a step of transcoding for the specified filter graph.
Definition: ffmpeg.c:3783
AVStream * st
Definition: ffmpeg.h:249
int * audio_channels_map
Definition: ffmpeg.h:421
#define VSYNC_PASSTHROUGH
Definition: ffmpeg.h:52
Describe the class of an AVClass context structure.
Definition: log.h:67
int sample_rate
Sample rate of the audio data.
Definition: frame.h:422
int configure_filtergraph(FilterGraph *fg)
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:1495
int av_frame_get_channels(const AVFrame *frame)
OutputStream ** output_streams
Definition: ffmpeg.c:142
int index
Definition: gxfenc.c:89
rational number numerator/denominator
Definition: rational.h:43
int file_index
Definition: ffmpeg.h:380
int av_thread_message_queue_alloc(AVThreadMessageQueue **mq, unsigned nelem, unsigned elsize)
Allocate a new message queue.
Definition: threadmessage.c:48
static int current_time
Definition: ffmpeg.c:130
int64_t sync_opts
Definition: ffmpeg.h:389
char * vstats_filename
Definition: ffmpeg_opt.c:79
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds...
Definition: cmdutils.c:152
char * disposition
Definition: ffmpeg.h:444
#define mid_pred
Definition: mathops.h:96
AVMediaType
Definition: avutil.h:192
discard useless packets like 0 size packets in avi
Definition: avcodec.h:664
static av_always_inline av_const long int lrint(double x)
Definition: libm.h:148
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:914
int nb_streams_warn
Definition: ffmpeg.h:350
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:2246
AVDictionary * decoder_opts
Definition: ffmpeg.h:276
int autorotate
Definition: ffmpeg.h:281
const char * name
Name of the codec described by this descriptor.
Definition: avcodec.h:568
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:1330
int showed_multi_packet_warning
Definition: ffmpeg.h:275
#define snprintf
Definition: snprintf.h:34
int frame_bits_per_raw_sample
Definition: ffmpeg_opt.c:103
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:3609
int64_t ts_offset
Definition: ffmpeg.h:343
uint32_t DWORD
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:328
static void do_subtitle_out(AVFormatContext *s, OutputStream *ost, InputStream *ist, AVSubtitle *sub)
Definition: ffmpeg.c:799
static int transcode_step(void)
Run a single step of transcoding.
Definition: ffmpeg.c:3829
char * filters_script
filtergraph script associated to the -filter_script option
Definition: ffmpeg.h:430
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
Return audio frame duration.
Definition: utils.c:3336
misc parsing utilities
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:86
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1475
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes...
Definition: avstring.c:93
AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx)
Get the frame rate of the input.
Definition: buffersink.c:358
This struct describes the properties of a single codec described by an AVCodecID. ...
Definition: avcodec.h:560
int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:262
AVFrame * filtered_frame
Definition: ffmpeg.h:399
int source_index
Definition: ffmpeg.h:382
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:265
static volatile int received_nb_signals
Definition: ffmpeg.c:314
int copy_prior_start
Definition: ffmpeg.h:443
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:462
int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
Read the file with name filename, and put its content in a newly allocated 0-terminated buffer...
Definition: cmdutils.c:1878
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:1321
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
Definition: ffmpeg.c:578
static int64_t pts
Global timestamp for the audio frames.
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:72
void av_thread_message_queue_free(AVThreadMessageQueue **mq)
Free a message queue.
Definition: threadmessage.c:84
int nb_filters
Definition: ffmpeg.h:310
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:133
static int flags
Definition: cpu.c:47
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
Definition: ffmpeg.c:2370
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds. ...
Definition: avformat.h:1357
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:182
uint8_t level
Definition: svq3.c:150
AVExpr * forced_keyframes_pexpr
Definition: ffmpeg.h:417
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
Definition: ffmpeg.h:265
int av_strerror(int errnum, char *errbuf, size_t errbuf_size)
Put a description of the AVERROR code errnum in errbuf.
Definition: error.c:68
int resample_sample_fmt
Definition: ffmpeg.h:286
int forced_kf_count
Definition: ffmpeg.h:414
int64_t start
Definition: avformat.h:1238
OSTFinished finished
Definition: ffmpeg.h:438
char * forced_keyframes
Definition: ffmpeg.h:416
uint64_t data_size
Definition: ffmpeg.h:329
#define CODEC_CAP_SUBFRAMES
Codec can output multiple frames per AVPacket Normally demuxers return one frame at a time...
Definition: avcodec.h:847
int resample_width
Definition: ffmpeg.h:283
int64_t next_pts
synthetic pts for the next decode frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:267
Main libavformat public API header.
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1032
AVPacketSideData * side_data
Additional packet data that can be provided by the container.
Definition: avcodec.h:1173
struct FilterGraph * graph
Definition: ffmpeg.h:227
static void filter(MpegAudioContext *s, int ch, const short *samples, int incr)
uint64_t limit_filesize
Definition: ffmpeg.h:466
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:63
static void print_final_stats(int64_t total_size)
Definition: ffmpeg.c:1338
AVIOContext * progress_avio
Definition: ffmpeg.c:131
int main(int argc, char **argv)
Definition: ffmpeg.c:4037
int reinit_filters
Definition: ffmpeg.h:312
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:465
#define VSYNC_CFR
Definition: ffmpeg.h:53
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
Definition: buffersink.c:261
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base...
Definition: avformat.h:894
static double c[64]
#define AVFMT_NOSTREAMS
Format does not require any streams.
Definition: avformat.h:477
AVStream * st
Definition: muxing.c:54
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:905
static AVCodecContext * dec_ctx
uint32_t start_display_time
Definition: avcodec.h:3509
int pts_wrap_bits
number of bits in pts (used for wrapping control)
Definition: avformat.h:1008
uint64_t samples_encoded
Definition: ffmpeg.h:457
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1237
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Definition: mem.c:206
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:49
int thread_safe_callbacks
Set by the client if its custom get_buffer() callback can be called synchronously from another thread...
Definition: avcodec.h:2782
int64_t nb_frames
number of frames in this stream if known or 0
Definition: avformat.h:903
char * key
Definition: dict.h:87
static FILE * vstats_file
Definition: ffmpeg.c:110
int den
denominator
Definition: rational.h:45
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents...
Definition: cmdutils.c:82
AVFrame * last_frame
Definition: ffmpeg.h:400
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
int copy_ts
Definition: ffmpeg_opt.c:95
struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1284
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:3644
AVFormatContext * ctx
Definition: ffmpeg.h:338
AVCodec * enc
Definition: ffmpeg.h:397
AVSubtitle subtitle
Definition: ffmpeg.h:295
int eof_reached
Definition: ffmpeg.h:339
int forced_kf_index
Definition: ffmpeg.h:415
static void do_audio_out(AVFormatContext *s, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:750
#define AVFMT_VARIABLE_FPS
Format allows variable fps.
Definition: avformat.h:475
char * avfilter
Definition: ffmpeg.h:428
uint8_t * name
Definition: ffmpeg.h:221
char * value
Definition: dict.h:88
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:372
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
#define NAN
Definition: math.h:28
float dts_delta_threshold
Definition: ffmpeg_opt.c:83
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:707
int channels
number of audio channels
Definition: avcodec.h:1986
#define av_log2
Definition: intmath.h:105
int top_field_first
Definition: ffmpeg.h:407
OutputFilter ** outputs
Definition: ffmpeg.h:243
InputFile ** input_files
Definition: ffmpeg.c:139
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.c:2408
void av_log_set_flags(int arg)
Definition: log.c:387
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:237
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:220
AVDictionary * bsf_args
Definition: ffmpeg.h:436
AVFormatContext * ctx
Definition: ffmpeg.h:461
#define AV_DISPOSITION_COMMENT
Definition: avformat.h:800
void show_usage(void)
Definition: ffmpeg_opt.c:2761
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> out
An instance of a filter.
Definition: avfilter.h:633
#define LIBAVCODEC_IDENT
Definition: version.h:43
char * hwaccel_device
Definition: ffmpeg.h:316
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1161
AVDictionary * encoder_opts
Definition: ffmpeg.h:433
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
Definition: mux.c:967
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffmpeg.c:108
int repeat_pict
This field is used for proper frame duration computation in lavf.
Definition: avcodec.h:4265
int height
Definition: frame.h:220
InputFilter ** inputs
Definition: ffmpeg.h:241
#define av_freep(p)
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:1982
enum AVPixelFormat hwaccel_retrieved_pix_fmt
Definition: ffmpeg.h:325
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:628
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:72
uint8_t * av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:324
OutputFile ** output_files
Definition: ffmpeg.c:144
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
#define av_malloc_array(a, b)
static void flush_encoders(void)
Definition: ffmpeg.c:1629
int copy_tb
Definition: ffmpeg_opt.c:97
static volatile int received_sigterm
Definition: ffmpeg.c:313
#define FFSWAP(type, a, b)
Definition: common.h:69
int discard
Definition: ffmpeg.h:250
static int get_input_packet(InputFile *f, AVPacket *pkt)
Definition: ffmpeg.c:3506
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2011
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
Definition: avpacket.c:299
int stream_index
Definition: avcodec.h:1164
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:884
enum HWAccelID hwaccel_id
Definition: ffmpeg.h:315
enum AVSubtitleType type
Definition: avcodec.h:3493
int64_t first_pts
Definition: ffmpeg.h:392
int nb_inputs
Definition: ffmpeg.h:242
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:907
int av_copy_packet_side_data(AVPacket *dst, const AVPacket *src)
Copy packet side data.
Definition: avpacket.c:223
#define DECODING_FOR_OST
Definition: ffmpeg.h:253
int index
Definition: ffmpeg.h:381
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:1062
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:85
AVPixelFormat
Pixel format.
Definition: pixfmt.h:61
uint64_t resample_channel_layout
Definition: ffmpeg.h:289
OSTFinished
Definition: ffmpeg.h:374
This structure stores compressed data.
Definition: avcodec.h:1139
void av_register_all(void)
Initialize libavformat and register all the muxers, demuxers and protocols.
Definition: allformats.c:51
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:955
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub)
Definition: utils.c:2187
int delay
Codec delay.
Definition: avcodec.h:1402
int debug_ts
Definition: ffmpeg_opt.c:98
static OutputStream * choose_output(void)
Select the output stream to process.
Definition: ffmpeg.c:3291
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:225
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:250
static void sigterm_handler(int sig)
Definition: ffmpeg.c:319
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1155
int64_t av_stream_get_end_pts(const AVStream *st)
Returns the pts of the last muxed packet + its duration.
Definition: utils.c:117
void * opaque
Private data of the user, can be used to carry app specific stuff.
Definition: avcodec.h:1298
for(j=16;j >0;--j)
#define FFMAX3(a, b, c)
Definition: common.h:65
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:241
#define tb
Definition: regdef.h:68
AVProgram ** programs
Definition: avformat.h:1422
#define AV_DISPOSITION_ORIGINAL
Definition: avformat.h:799
int avio_printf(AVIOContext *s, const char *fmt,...) av_printf_format(2
InputStream ** input_streams
Definition: ffmpeg.c:137
const HWAccel hwaccels[]
Definition: ffmpeg_opt.c:66
Definition: ffmpeg.h:368
static void close_output_stream(OutputStream *ost)
Definition: ffmpeg.c:726
struct InputStream::@25 prev_sub
uint8_t * subtitle_header
Header containing style information for text subtitles.
Definition: avcodec.h:2957