FFmpeg
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #if HAVE_ISATTY
34 #if HAVE_IO_H
35 #include <io.h>
36 #endif
37 #if HAVE_UNISTD_H
38 #include <unistd.h>
39 #endif
40 #endif
41 #include "libavformat/avformat.h"
42 #include "libavdevice/avdevice.h"
43 #include "libswscale/swscale.h"
45 #include "libavutil/opt.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/colorspace.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavformat/os_support.h"
62 
63 #include "libavformat/ffm.h" // not public API
64 
65 # include "libavfilter/avcodec.h"
66 # include "libavfilter/avfilter.h"
68 # include "libavfilter/buffersrc.h"
69 # include "libavfilter/buffersink.h"
70 
71 #if HAVE_SYS_RESOURCE_H
72 #include <sys/time.h>
73 #include <sys/types.h>
74 #include <sys/resource.h>
75 #elif HAVE_GETPROCESSTIMES
76 #include <windows.h>
77 #endif
78 #if HAVE_GETPROCESSMEMORYINFO
79 #include <windows.h>
80 #include <psapi.h>
81 #endif
82 
83 #if HAVE_SYS_SELECT_H
84 #include <sys/select.h>
85 #endif
86 
87 #if HAVE_TERMIOS_H
88 #include <fcntl.h>
89 #include <sys/ioctl.h>
90 #include <sys/time.h>
91 #include <termios.h>
92 #elif HAVE_KBHIT
93 #include <conio.h>
94 #endif
95 
96 #if HAVE_PTHREADS
97 #include <pthread.h>
98 #endif
99 
100 #include <time.h>
101 
102 #include "ffmpeg.h"
103 #include "cmdutils.h"
104 
105 #include "libavutil/avassert.h"
106 
107 const char program_name[] = "ffmpeg";
108 const int program_birth_year = 2000;
109 
110 static FILE *vstats_file;
111 
112 static void do_video_stats(OutputStream *ost, int frame_size);
113 static int64_t getutime(void);
114 
115 static int run_as_daemon = 0;
116 static int64_t video_size = 0;
117 static int64_t audio_size = 0;
118 static int64_t subtitle_size = 0;
119 static int64_t extra_size = 0;
120 static int nb_frames_dup = 0;
121 static int nb_frames_drop = 0;
122 
123 static int current_time;
125 
127 
128 #if HAVE_PTHREADS
129 /* signal to input threads that they should exit; set by the main thread */
130 static int transcoding_finished;
131 #endif
132 
133 #define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass"
134 
139 
144 
147 
148 #if HAVE_TERMIOS_H
149 
150 /* init terminal so that we can grab keys */
151 static struct termios oldtty;
152 static int restore_tty;
153 #endif
154 
155 static void free_input_threads(void);
156 
157 
158 /* sub2video hack:
159  Convert subtitles to video with alpha to insert them in filter graphs.
160  This is a temporary solution until libavfilter gets real subtitles support.
161  */
162 
163 
164 
165 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
166  AVSubtitleRect *r)
167 {
168  uint32_t *pal, *dst2;
169  uint8_t *src, *src2;
170  int x, y;
171 
172  if (r->type != SUBTITLE_BITMAP) {
173  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
174  return;
175  }
176  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
177  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle overflowing\n");
178  return;
179  }
180 
181  dst += r->y * dst_linesize + r->x * 4;
182  src = r->pict.data[0];
183  pal = (uint32_t *)r->pict.data[1];
184  for (y = 0; y < r->h; y++) {
185  dst2 = (uint32_t *)dst;
186  src2 = src;
187  for (x = 0; x < r->w; x++)
188  *(dst2++) = pal[*(src2++)];
189  dst += dst_linesize;
190  src += r->pict.linesize[0];
191  }
192 }
193 
194 static void sub2video_push_ref(InputStream *ist, int64_t pts)
195 {
196  AVFilterBufferRef *ref = ist->sub2video.ref;
197  int i;
198 
199  ist->sub2video.last_pts = ref->pts = pts;
200  for (i = 0; i < ist->nb_filters; i++)
202  avfilter_ref_buffer(ref, ~0),
206 }
207 
208 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
209 {
210  int w = ist->sub2video.w, h = ist->sub2video.h;
211  AVFilterBufferRef *ref = ist->sub2video.ref;
212  int8_t *dst;
213  int dst_linesize;
214  int num_rects, i;
215  int64_t pts, end_pts;
216 
217  if (!ref)
218  return;
219  if (sub) {
220  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000,
221  AV_TIME_BASE_Q, ist->st->time_base);
222  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000,
223  AV_TIME_BASE_Q, ist->st->time_base);
224  num_rects = sub->num_rects;
225  } else {
226  pts = ist->sub2video.end_pts;
227  end_pts = INT64_MAX;
228  num_rects = 0;
229  }
230  dst = ref->data [0];
231  dst_linesize = ref->linesize[0];
232  memset(dst, 0, h * dst_linesize);
233  for (i = 0; i < num_rects; i++)
234  sub2video_copy_rect(dst, dst_linesize, w, h, sub->rects[i]);
235  sub2video_push_ref(ist, pts);
236  ist->sub2video.end_pts = end_pts;
237 }
238 
239 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
240 {
241  InputFile *infile = input_files[ist->file_index];
242  int i, j, nb_reqs;
243  int64_t pts2;
244 
245  /* When a frame is read from a file, examine all sub2video streams in
246  the same file and send the sub2video frame again. Otherwise, decoded
247  video frames could be accumulating in the filter graph while a filter
248  (possibly overlay) is desperately waiting for a subtitle frame. */
249  for (i = 0; i < infile->nb_streams; i++) {
250  InputStream *ist2 = input_streams[infile->ist_index + i];
251  if (!ist2->sub2video.ref)
252  continue;
253  /* subtitles seem to be usually muxed ahead of other streams;
254  if not, substracting a larger time here is necessary */
255  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
256  /* do not send the heartbeat frame if the subtitle is already ahead */
257  if (pts2 <= ist2->sub2video.last_pts)
258  continue;
259  if (pts2 >= ist2->sub2video.end_pts)
260  sub2video_update(ist2, NULL);
261  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
262  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
263  if (nb_reqs)
264  sub2video_push_ref(ist2, pts2);
265  }
266 }
267 
268 static void sub2video_flush(InputStream *ist)
269 {
270  int i;
271 
272  for (i = 0; i < ist->nb_filters; i++)
273  av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
274 }
275 
276 /* end of sub2video hack */
277 
278 void term_exit(void)
279 {
280  av_log(NULL, AV_LOG_QUIET, "%s", "");
281 #if HAVE_TERMIOS_H
282  if(restore_tty)
283  tcsetattr (0, TCSANOW, &oldtty);
284 #endif
285 }
286 
287 static volatile int received_sigterm = 0;
288 static volatile int received_nb_signals = 0;
289 
290 static void
292 {
293  received_sigterm = sig;
295  term_exit();
296  if(received_nb_signals > 3)
297  exit(123);
298 }
299 
300 void term_init(void)
301 {
302 #if HAVE_TERMIOS_H
303  if(!run_as_daemon){
304  struct termios tty;
305  int istty = 1;
306 #if HAVE_ISATTY
307  istty = isatty(0) && isatty(2);
308 #endif
309  if (istty && tcgetattr (0, &tty) == 0) {
310  oldtty = tty;
311  restore_tty = 1;
312  atexit(term_exit);
313 
314  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
315  |INLCR|IGNCR|ICRNL|IXON);
316  tty.c_oflag |= OPOST;
317  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
318  tty.c_cflag &= ~(CSIZE|PARENB);
319  tty.c_cflag |= CS8;
320  tty.c_cc[VMIN] = 1;
321  tty.c_cc[VTIME] = 0;
322 
323  tcsetattr (0, TCSANOW, &tty);
324  }
325  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
326  }
327 #endif
329 
330  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
331  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
332 #ifdef SIGXCPU
333  signal(SIGXCPU, sigterm_handler);
334 #endif
335 }
336 
337 /* read a key without blocking */
338 static int read_key(void)
339 {
340  unsigned char ch;
341 #if HAVE_TERMIOS_H
342  int n = 1;
343  struct timeval tv;
344  fd_set rfds;
345 
346  FD_ZERO(&rfds);
347  FD_SET(0, &rfds);
348  tv.tv_sec = 0;
349  tv.tv_usec = 0;
350  n = select(1, &rfds, NULL, NULL, &tv);
351  if (n > 0) {
352  n = read(0, &ch, 1);
353  if (n == 1)
354  return ch;
355 
356  return n;
357  }
358 #elif HAVE_KBHIT
359 # if HAVE_PEEKNAMEDPIPE
360  static int is_pipe;
361  static HANDLE input_handle;
362  DWORD dw, nchars;
363  if(!input_handle){
364  input_handle = GetStdHandle(STD_INPUT_HANDLE);
365  is_pipe = !GetConsoleMode(input_handle, &dw);
366  }
367 
368  if (stdin->_cnt > 0) {
369  read(0, &ch, 1);
370  return ch;
371  }
372  if (is_pipe) {
373  /* When running under a GUI, you will end here. */
374  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
375  // input pipe may have been closed by the program that ran ffmpeg
376  return -1;
377  }
378  //Read it
379  if(nchars != 0) {
380  read(0, &ch, 1);
381  return ch;
382  }else{
383  return -1;
384  }
385  }
386 # endif
387  if(kbhit())
388  return(getch());
389 #endif
390  return -1;
391 }
392 
393 static int decode_interrupt_cb(void *ctx)
394 {
395  return received_nb_signals > 1;
396 }
397 
399 
400 static void exit_program(void)
401 {
402  int i, j;
403 
404  for (i = 0; i < nb_filtergraphs; i++) {
405  avfilter_graph_free(&filtergraphs[i]->graph);
406  for (j = 0; j < filtergraphs[i]->nb_inputs; j++) {
407  av_freep(&filtergraphs[i]->inputs[j]->name);
408  av_freep(&filtergraphs[i]->inputs[j]);
409  }
410  av_freep(&filtergraphs[i]->inputs);
411  for (j = 0; j < filtergraphs[i]->nb_outputs; j++) {
412  av_freep(&filtergraphs[i]->outputs[j]->name);
413  av_freep(&filtergraphs[i]->outputs[j]);
414  }
415  av_freep(&filtergraphs[i]->outputs);
416  av_freep(&filtergraphs[i]);
417  }
418  av_freep(&filtergraphs);
419 
421 
422  /* close files */
423  for (i = 0; i < nb_output_files; i++) {
424  AVFormatContext *s = output_files[i]->ctx;
425  if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
426  avio_close(s->pb);
428  av_dict_free(&output_files[i]->opts);
429  av_freep(&output_files[i]);
430  }
431  for (i = 0; i < nb_output_streams; i++) {
432  AVBitStreamFilterContext *bsfc = output_streams[i]->bitstream_filters;
433  while (bsfc) {
434  AVBitStreamFilterContext *next = bsfc->next;
436  bsfc = next;
437  }
438  output_streams[i]->bitstream_filters = NULL;
439  avcodec_free_frame(&output_streams[i]->filtered_frame);
440 
441  av_freep(&output_streams[i]->forced_keyframes);
442  av_freep(&output_streams[i]->avfilter);
443  av_freep(&output_streams[i]->logfile_prefix);
444  av_freep(&output_streams[i]);
445  }
446 #if HAVE_PTHREADS
448 #endif
449  for (i = 0; i < nb_input_files; i++) {
450  avformat_close_input(&input_files[i]->ctx);
451  av_freep(&input_files[i]);
452  }
453  for (i = 0; i < nb_input_streams; i++) {
454  avcodec_free_frame(&input_streams[i]->decoded_frame);
455  av_dict_free(&input_streams[i]->opts);
456  free_buffer_pool(&input_streams[i]->buffer_pool);
457  avfilter_unref_bufferp(&input_streams[i]->sub2video.ref);
458  av_freep(&input_streams[i]->filters);
459  av_freep(&input_streams[i]);
460  }
461 
462  if (vstats_file)
463  fclose(vstats_file);
465 
466  av_freep(&input_streams);
467  av_freep(&input_files);
468  av_freep(&output_streams);
469  av_freep(&output_files);
470 
471  uninit_opts();
472 
473  avfilter_uninit();
475 
476  if (received_sigterm) {
477  av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
478  (int) received_sigterm);
479  }
480 }
481 
483 {
485  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
486  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
487  exit(1);
488  }
489 }
490 
491 static void abort_codec_experimental(AVCodec *c, int encoder)
492 {
493  exit(1);
494 }
495 
496 static void update_benchmark(const char *fmt, ...)
497 {
498  if (do_benchmark_all) {
499  int64_t t = getutime();
500  va_list va;
501  char buf[1024];
502 
503  if (fmt) {
504  va_start(va, fmt);
505  vsnprintf(buf, sizeof(buf), fmt, va);
506  va_end(va);
507  printf("bench: %8"PRIu64" %s \n", t - current_time, buf);
508  }
509  current_time = t;
510  }
511 }
512 
514 {
516  AVCodecContext *avctx = ost->st->codec;
517  int ret;
518 
521  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
522 
523  if ((avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) && pkt->dts != AV_NOPTS_VALUE) {
524  int64_t max = ost->st->cur_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
525  if (ost->st->cur_dts && ost->st->cur_dts != AV_NOPTS_VALUE && max > pkt->dts) {
526  av_log(s, max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG,
527  "st:%d PTS: %"PRId64" DTS: %"PRId64" < %"PRId64" invalid, clipping\n", pkt->stream_index, pkt->pts, pkt->dts, max);
528  if(pkt->pts >= pkt->dts)
529  pkt->pts = FFMAX(pkt->pts, max);
530  pkt->dts = max;
531  }
532  }
533 
534  /*
535  * Audio encoders may split the packets -- #frames in != #packets out.
536  * But there is no reordering, so we can limit the number of output packets
537  * by simply dropping them here.
538  * Counting encoded video frames needs to be done separately because of
539  * reordering, see do_video_out()
540  */
541  if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
542  if (ost->frame_number >= ost->max_frames) {
543  av_free_packet(pkt);
544  return;
545  }
546  ost->frame_number++;
547  }
548 
549  while (bsfc) {
550  AVPacket new_pkt = *pkt;
551  int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
552  &new_pkt.data, &new_pkt.size,
553  pkt->data, pkt->size,
554  pkt->flags & AV_PKT_FLAG_KEY);
555  if(a == 0 && new_pkt.data != pkt->data && new_pkt.destruct) {
556  uint8_t *t = av_malloc(new_pkt.size + FF_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
557  if(t) {
558  memcpy(t, new_pkt.data, new_pkt.size);
559  memset(t + new_pkt.size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
560  new_pkt.data = t;
561  a = 1;
562  } else
563  a = AVERROR(ENOMEM);
564  }
565  if (a > 0) {
566  av_free_packet(pkt);
567  new_pkt.destruct = av_destruct_packet;
568  } else if (a < 0) {
569  av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
570  bsfc->filter->name, pkt->stream_index,
571  avctx->codec ? avctx->codec->name : "copy");
572  print_error("", a);
573  if (exit_on_error)
574  exit(1);
575  }
576  *pkt = new_pkt;
577 
578  bsfc = bsfc->next;
579  }
580 
581  pkt->stream_index = ost->index;
582 
583  if (debug_ts) {
584  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
585  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
587  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
588  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
589  pkt->size
590  );
591  }
592 
593  ret = av_interleaved_write_frame(s, pkt);
594  if (ret < 0) {
595  print_error("av_interleaved_write_frame()", ret);
596  exit(1);
597  }
598 }
599 
601 {
602  OutputFile *of = output_files[ost->file_index];
603 
604  ost->finished = 1;
605  if (of->shortest) {
606  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, AV_TIME_BASE_Q);
607  of->recording_time = FFMIN(of->recording_time, end);
608  }
609 }
610 
612 {
613  OutputFile *of = output_files[ost->file_index];
614 
615  if (of->recording_time != INT64_MAX &&
617  AV_TIME_BASE_Q) >= 0) {
618  close_output_stream(ost);
619  return 0;
620  }
621  return 1;
622 }
623 
625  AVFrame *frame)
626 {
627  AVCodecContext *enc = ost->st->codec;
628  AVPacket pkt;
629  int got_packet = 0;
630 
631  av_init_packet(&pkt);
632  pkt.data = NULL;
633  pkt.size = 0;
634 
635  if (!check_recording_time(ost))
636  return;
637 
638  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
639  frame->pts = ost->sync_opts;
640  ost->sync_opts = frame->pts + frame->nb_samples;
641 
642  av_assert0(pkt.size || !pkt.data);
644  if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
645  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
646  exit(1);
647  }
648  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
649 
650  if (got_packet) {
651  if (pkt.pts != AV_NOPTS_VALUE)
652  pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
653  if (pkt.dts != AV_NOPTS_VALUE)
654  pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
655  if (pkt.duration > 0)
656  pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
657 
658  if (debug_ts) {
659  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
660  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
661  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
662  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
663  }
664 
665  audio_size += pkt.size;
666  write_frame(s, &pkt, ost);
667 
668  av_free_packet(&pkt);
669  }
670 }
671 
672 static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void **bufp)
673 {
674  AVCodecContext *dec;
675  AVPicture *picture2;
676  AVPicture picture_tmp;
677  uint8_t *buf = 0;
678 
679  dec = ist->st->codec;
680 
681  /* deinterlace : must be done before any resize */
682  if (do_deinterlace) {
683  int size;
684 
685  /* create temporary picture */
686  size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
687  if (size < 0)
688  return;
689  buf = av_malloc(size);
690  if (!buf)
691  return;
692 
693  picture2 = &picture_tmp;
694  avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
695 
696  if (avpicture_deinterlace(picture2, picture,
697  dec->pix_fmt, dec->width, dec->height) < 0) {
698  /* if error, do not deinterlace */
699  av_log(NULL, AV_LOG_WARNING, "Deinterlacing failed\n");
700  av_free(buf);
701  buf = NULL;
702  picture2 = picture;
703  }
704  } else {
705  picture2 = picture;
706  }
707 
708  if (picture != picture2)
709  *picture = *picture2;
710  *bufp = buf;
711 }
712 
714  OutputStream *ost,
715  InputStream *ist,
716  AVSubtitle *sub)
717 {
718  int subtitle_out_max_size = 1024 * 1024;
719  int subtitle_out_size, nb, i;
720  AVCodecContext *enc;
721  AVPacket pkt;
722  int64_t pts;
723 
724  if (sub->pts == AV_NOPTS_VALUE) {
725  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
726  if (exit_on_error)
727  exit(1);
728  return;
729  }
730 
731  enc = ost->st->codec;
732 
733  if (!subtitle_out) {
734  subtitle_out = av_malloc(subtitle_out_max_size);
735  }
736 
737  /* Note: DVB subtitle need one packet to draw them and one other
738  packet to clear them */
739  /* XXX: signal it in the codec context ? */
741  nb = 2;
742  else
743  nb = 1;
744 
745  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
746  pts = sub->pts - output_files[ost->file_index]->start_time;
747  for (i = 0; i < nb; i++) {
748  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
749  if (!check_recording_time(ost))
750  return;
751 
752  sub->pts = pts;
753  // start_display_time is required to be 0
754  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
756  sub->start_display_time = 0;
757  if (i == 1)
758  sub->num_rects = 0;
759  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
760  subtitle_out_max_size, sub);
761  if (subtitle_out_size < 0) {
762  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
763  exit(1);
764  }
765 
766  av_init_packet(&pkt);
767  pkt.data = subtitle_out;
768  pkt.size = subtitle_out_size;
769  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
770  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
771  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
772  /* XXX: the pts correction is handled here. Maybe handling
773  it in the codec would be better */
774  if (i == 0)
775  pkt.pts += 90 * sub->start_display_time;
776  else
777  pkt.pts += 90 * sub->end_display_time;
778  }
780  write_frame(s, &pkt, ost);
781  }
782 }
783 
785  OutputStream *ost,
786  AVFrame *in_picture)
787 {
788  int ret, format_video_sync;
789  AVPacket pkt;
790  AVCodecContext *enc = ost->st->codec;
791  int nb_frames, i;
792  double sync_ipts, delta;
793  double duration = 0;
794  int frame_size = 0;
795  InputStream *ist = NULL;
796 
797  if (ost->source_index >= 0)
798  ist = input_streams[ost->source_index];
799 
800  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
801  duration = 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base));
802 
803  sync_ipts = in_picture->pts;
804  delta = sync_ipts - ost->sync_opts + duration;
805 
806  /* by default, we output a single frame */
807  nb_frames = 1;
808 
809  format_video_sync = video_sync_method;
810  if (format_video_sync == VSYNC_AUTO)
812 
813  switch (format_video_sync) {
814  case VSYNC_CFR:
815  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
816  if (delta < -1.1)
817  nb_frames = 0;
818  else if (delta > 1.1)
819  nb_frames = lrintf(delta);
820  break;
821  case VSYNC_VFR:
822  if (delta <= -0.6)
823  nb_frames = 0;
824  else if (delta > 0.6)
825  ost->sync_opts = lrint(sync_ipts);
826  break;
827  case VSYNC_DROP:
828  case VSYNC_PASSTHROUGH:
829  ost->sync_opts = lrint(sync_ipts);
830  break;
831  default:
832  av_assert0(0);
833  }
834 
835  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
836  if (nb_frames == 0) {
837  nb_frames_drop++;
838  av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
839  return;
840  } else if (nb_frames > 1) {
841  if (nb_frames > dts_error_threshold * 30) {
842  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
843  nb_frames_drop++;
844  return;
845  }
846  nb_frames_dup += nb_frames - 1;
847  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
848  }
849 
850  /* duplicates frame if needed */
851  for (i = 0; i < nb_frames; i++) {
852  av_init_packet(&pkt);
853  pkt.data = NULL;
854  pkt.size = 0;
855 
856  in_picture->pts = ost->sync_opts;
857 
858  if (!check_recording_time(ost))
859  return;
860 
861  if (s->oformat->flags & AVFMT_RAWPICTURE &&
862  enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
863  /* raw pictures are written as AVPicture structure to
864  avoid any copies. We support temporarily the older
865  method. */
866  enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
867  enc->coded_frame->top_field_first = in_picture->top_field_first;
868  if (enc->coded_frame->interlaced_frame)
870  else
872  pkt.data = (uint8_t *)in_picture;
873  pkt.size = sizeof(AVPicture);
874  pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
875  pkt.flags |= AV_PKT_FLAG_KEY;
876 
877  video_size += pkt.size;
878  write_frame(s, &pkt, ost);
879  } else {
880  int got_packet;
881  AVFrame big_picture;
882 
883  big_picture = *in_picture;
884  /* better than nothing: use input picture interlaced
885  settings */
886  big_picture.interlaced_frame = in_picture->interlaced_frame;
888  if (ost->top_field_first == -1)
889  big_picture.top_field_first = in_picture->top_field_first;
890  else
891  big_picture.top_field_first = !!ost->top_field_first;
892  }
893 
894  if (big_picture.interlaced_frame) {
895  if (enc->codec->id == AV_CODEC_ID_MJPEG)
896  enc->field_order = big_picture.top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
897  else
898  enc->field_order = big_picture.top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
899  } else
901 
902  big_picture.quality = ost->st->codec->global_quality;
903  if (!enc->me_threshold)
904  big_picture.pict_type = 0;
905  if (ost->forced_kf_index < ost->forced_kf_count &&
906  big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
907  big_picture.pict_type = AV_PICTURE_TYPE_I;
908  ost->forced_kf_index++;
909  }
911  ret = avcodec_encode_video2(enc, &pkt, &big_picture, &got_packet);
912  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
913  if (ret < 0) {
914  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
915  exit(1);
916  }
917 
918  if (got_packet) {
919  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & CODEC_CAP_DELAY))
920  pkt.pts = ost->sync_opts;
921 
922  if (pkt.pts != AV_NOPTS_VALUE)
923  pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
924  if (pkt.dts != AV_NOPTS_VALUE)
925  pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
926 
927  if (debug_ts) {
928  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
929  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
930  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
931  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
932  }
933 
934  frame_size = pkt.size;
935  video_size += pkt.size;
936  write_frame(s, &pkt, ost);
937  av_free_packet(&pkt);
938 
939  /* if two pass, output log */
940  if (ost->logfile && enc->stats_out) {
941  fprintf(ost->logfile, "%s", enc->stats_out);
942  }
943  }
944  }
945  ost->sync_opts++;
946  /*
947  * For video, number of frames in == number of packets out.
948  * But there may be reordering, so we can't throw away frames on encoder
949  * flush, we need to limit them here, before they go into encoder.
950  */
951  ost->frame_number++;
952  }
953 
954  if (vstats_filename && frame_size)
955  do_video_stats(ost, frame_size);
956 }
957 
958 static double psnr(double d)
959 {
960  return -10.0 * log(d) / log(10.0);
961 }
962 
963 static void do_video_stats(OutputStream *ost, int frame_size)
964 {
965  AVCodecContext *enc;
966  int frame_number;
967  double ti1, bitrate, avg_bitrate;
968 
969  /* this is executed just the first time do_video_stats is called */
970  if (!vstats_file) {
971  vstats_file = fopen(vstats_filename, "w");
972  if (!vstats_file) {
973  perror("fopen");
974  exit(1);
975  }
976  }
977 
978  enc = ost->st->codec;
979  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
980  frame_number = ost->st->nb_frames;
981  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
982  if (enc->flags&CODEC_FLAG_PSNR)
983  fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
984 
985  fprintf(vstats_file,"f_size= %6d ", frame_size);
986  /* compute pts value */
987  ti1 = ost->st->pts.val * av_q2d(enc->time_base);
988  if (ti1 < 0.01)
989  ti1 = 0.01;
990 
991  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
992  avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
993  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
994  (double)video_size / 1024, ti1, bitrate, avg_bitrate);
995  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
996  }
997 }
998 
999 /**
1000  * Get and encode new output from any of the filtergraphs, without causing
1001  * activity.
1002  *
1003  * @return 0 for success, <0 for severe errors
1004  */
1005 static int reap_filters(void)
1006 {
1007  AVFilterBufferRef *picref;
1008  AVFrame *filtered_frame = NULL;
1009  int i;
1010  int64_t frame_pts;
1011 
1012  /* Reap all buffers present in the buffer sinks */
1013  for (i = 0; i < nb_output_streams; i++) {
1014  OutputStream *ost = output_streams[i];
1015  OutputFile *of = output_files[ost->file_index];
1016  int ret = 0;
1017 
1018  if (!ost->filter)
1019  continue;
1020 
1021  if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) {
1022  return AVERROR(ENOMEM);
1023  } else
1025  filtered_frame = ost->filtered_frame;
1026 
1027  while (1) {
1028  ret = av_buffersink_get_buffer_ref(ost->filter->filter, &picref,
1030  if (ret < 0) {
1031  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1032  char buf[256];
1033  av_strerror(ret, buf, sizeof(buf));
1035  "Error in av_buffersink_get_buffer_ref(): %s\n", buf);
1036  }
1037  break;
1038  }
1039  frame_pts = AV_NOPTS_VALUE;
1040  if (picref->pts != AV_NOPTS_VALUE) {
1041  filtered_frame->pts = frame_pts = av_rescale_q(picref->pts,
1042  ost->filter->filter->inputs[0]->time_base,
1043  ost->st->codec->time_base) -
1046  ost->st->codec->time_base);
1047 
1048  if (of->start_time && filtered_frame->pts < 0) {
1049  avfilter_unref_buffer(picref);
1050  continue;
1051  }
1052  }
1053  //if (ost->source_index >= 0)
1054  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1055 
1056 
1057  switch (ost->filter->filter->inputs[0]->type) {
1058  case AVMEDIA_TYPE_VIDEO:
1059  avfilter_copy_buf_props(filtered_frame, picref);
1060  filtered_frame->pts = frame_pts;
1061  if (!ost->frame_aspect_ratio)
1063 
1064  do_video_out(of->ctx, ost, filtered_frame);
1065  break;
1066  case AVMEDIA_TYPE_AUDIO:
1067  avfilter_copy_buf_props(filtered_frame, picref);
1068  filtered_frame->pts = frame_pts;
1069  do_audio_out(of->ctx, ost, filtered_frame);
1070  break;
1071  default:
1072  // TODO support subtitle filters
1073  av_assert0(0);
1074  }
1075 
1076  avfilter_unref_buffer(picref);
1077  }
1078  }
1079 
1080  return 0;
1081 }
1082 
1083 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1084 {
1085  char buf[1024];
1086  AVBPrint buf_script;
1087  OutputStream *ost;
1088  AVFormatContext *oc;
1089  int64_t total_size;
1090  AVCodecContext *enc;
1091  int frame_number, vid, i;
1092  double bitrate;
1093  int64_t pts = INT64_MIN;
1094  static int64_t last_time = -1;
1095  static int qp_histogram[52];
1096  int hours, mins, secs, us;
1097 
1098  if (!print_stats && !is_last_report && !progress_avio)
1099  return;
1100 
1101  if (!is_last_report) {
1102  if (last_time == -1) {
1103  last_time = cur_time;
1104  return;
1105  }
1106  if ((cur_time - last_time) < 500000)
1107  return;
1108  last_time = cur_time;
1109  }
1110 
1111 
1112  oc = output_files[0]->ctx;
1113 
1114  total_size = avio_size(oc->pb);
1115  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1116  total_size = avio_tell(oc->pb);
1117 
1118  buf[0] = '\0';
1119  vid = 0;
1120  av_bprint_init(&buf_script, 0, 1);
1121  for (i = 0; i < nb_output_streams; i++) {
1122  float q = -1;
1123  ost = output_streams[i];
1124  enc = ost->st->codec;
1125  if (!ost->stream_copy && enc->coded_frame)
1126  q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
1127  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1128  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1129  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1130  ost->file_index, ost->index, q);
1131  }
1132  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1133  float fps, t = (cur_time-timer_start) / 1000000.0;
1134 
1135  frame_number = ost->frame_number;
1136  fps = t > 1 ? frame_number / t : 0;
1137  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1138  frame_number, fps < 9.95, fps, q);
1139  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1140  av_bprintf(&buf_script, "fps=%.1f\n", fps);
1141  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1142  ost->file_index, ost->index, q);
1143  if (is_last_report)
1144  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1145  if (qp_hist) {
1146  int j;
1147  int qp = lrintf(q);
1148  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1149  qp_histogram[qp]++;
1150  for (j = 0; j < 32; j++)
1151  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
1152  }
1153  if ((enc->flags&CODEC_FLAG_PSNR) && (enc->coded_frame || is_last_report)) {
1154  int j;
1155  double error, error_sum = 0;
1156  double scale, scale_sum = 0;
1157  double p;
1158  char type[3] = { 'Y','U','V' };
1159  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1160  for (j = 0; j < 3; j++) {
1161  if (is_last_report) {
1162  error = enc->error[j];
1163  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1164  } else {
1165  error = enc->coded_frame->error[j];
1166  scale = enc->width * enc->height * 255.0 * 255.0;
1167  }
1168  if (j)
1169  scale /= 4;
1170  error_sum += error;
1171  scale_sum += scale;
1172  p = psnr(error / scale);
1173  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1174  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1175  ost->file_index, ost->index, type[i] | 32, p);
1176  }
1177  p = psnr(error_sum / scale_sum);
1178  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1179  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1180  ost->file_index, ost->index, p);
1181  }
1182  vid = 1;
1183  }
1184  /* compute min output value */
1185  if ((is_last_report || !ost->finished) && ost->st->pts.val != AV_NOPTS_VALUE)
1186  pts = FFMAX(pts, av_rescale_q(ost->st->pts.val,
1187  ost->st->time_base, AV_TIME_BASE_Q));
1188  }
1189 
1190  secs = pts / AV_TIME_BASE;
1191  us = pts % AV_TIME_BASE;
1192  mins = secs / 60;
1193  secs %= 60;
1194  hours = mins / 60;
1195  mins %= 60;
1196 
1197  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1198 
1199  if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1200  "size=N/A time=");
1201  else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1202  "size=%8.0fkB time=", total_size / 1024.0);
1203  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1204  "%02d:%02d:%02d.%02d ", hours, mins, secs,
1205  (100 * us) / AV_TIME_BASE);
1206  if (bitrate < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1207  "bitrate=N/A");
1208  else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1209  "bitrate=%6.1fkbits/s", bitrate);
1210  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1211  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1212  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1213  av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1214  hours, mins, secs, us);
1215 
1217  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1219  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1220  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1221 
1222  if (print_stats || is_last_report) {
1223  av_log(NULL, AV_LOG_INFO, "%s \r", buf);
1224 
1225  fflush(stderr);
1226  }
1227 
1228  if (progress_avio) {
1229  av_bprintf(&buf_script, "progress=%s\n",
1230  is_last_report ? "end" : "continue");
1231  avio_write(progress_avio, buf_script.str,
1232  FFMIN(buf_script.len, buf_script.size - 1));
1233  avio_flush(progress_avio);
1234  av_bprint_finalize(&buf_script, NULL);
1235  if (is_last_report) {
1236  avio_close(progress_avio);
1237  progress_avio = NULL;
1238  }
1239  }
1240 
1241  if (is_last_report) {
1242  int64_t raw= audio_size + video_size + subtitle_size + extra_size;
1243  av_log(NULL, AV_LOG_INFO, "\n");
1244  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0f global headers:%1.0fkB muxing overhead %f%%\n",
1245  video_size / 1024.0,
1246  audio_size / 1024.0,
1247  subtitle_size / 1024.0,
1248  extra_size / 1024.0,
1249  100.0 * (total_size - raw) / raw
1250  );
1251  if(video_size + audio_size + subtitle_size + extra_size == 0){
1252  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded (check -ss / -t / -frames parameters if used)\n");
1253  }
1254  }
1255 }
1256 
1257 static void flush_encoders(void)
1258 {
1259  int i, ret;
1260 
1261  for (i = 0; i < nb_output_streams; i++) {
1262  OutputStream *ost = output_streams[i];
1263  AVCodecContext *enc = ost->st->codec;
1264  AVFormatContext *os = output_files[ost->file_index]->ctx;
1265  int stop_encoding = 0;
1266 
1267  if (!ost->encoding_needed)
1268  continue;
1269 
1270  if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1271  continue;
1273  continue;
1274 
1275  for (;;) {
1276  int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1277  const char *desc;
1278  int64_t *size;
1279 
1280  switch (ost->st->codec->codec_type) {
1281  case AVMEDIA_TYPE_AUDIO:
1282  encode = avcodec_encode_audio2;
1283  desc = "Audio";
1284  size = &audio_size;
1285  break;
1286  case AVMEDIA_TYPE_VIDEO:
1287  encode = avcodec_encode_video2;
1288  desc = "Video";
1289  size = &video_size;
1290  break;
1291  default:
1292  stop_encoding = 1;
1293  }
1294 
1295  if (encode) {
1296  AVPacket pkt;
1297  int got_packet;
1298  av_init_packet(&pkt);
1299  pkt.data = NULL;
1300  pkt.size = 0;
1301 
1303  ret = encode(enc, &pkt, NULL, &got_packet);
1304  update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1305  if (ret < 0) {
1306  av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
1307  exit(1);
1308  }
1309  *size += pkt.size;
1310  if (ost->logfile && enc->stats_out) {
1311  fprintf(ost->logfile, "%s", enc->stats_out);
1312  }
1313  if (!got_packet) {
1314  stop_encoding = 1;
1315  break;
1316  }
1317  if (pkt.pts != AV_NOPTS_VALUE)
1318  pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
1319  if (pkt.dts != AV_NOPTS_VALUE)
1320  pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
1321  if (pkt.duration > 0)
1322  pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
1323  write_frame(os, &pkt, ost);
1325  do_video_stats(ost, pkt.size);
1326  }
1327  }
1328 
1329  if (stop_encoding)
1330  break;
1331  }
1332  }
1333 }
1334 
1335 /*
1336  * Check whether a packet from ist should be written into ost at this time
1337  */
1339 {
1340  OutputFile *of = output_files[ost->file_index];
1341  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1342 
1343  if (ost->source_index != ist_index)
1344  return 0;
1345 
1346  if (of->start_time && ist->pts < of->start_time)
1347  return 0;
1348 
1349  return 1;
1350 }
1351 
1352 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1353 {
1354  OutputFile *of = output_files[ost->file_index];
1355  int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
1356  AVPicture pict;
1357  AVPacket opkt;
1358 
1359  av_init_packet(&opkt);
1360 
1361  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1363  return;
1364 
1365  if (!ost->frame_number && ist->pts < of->start_time &&
1366  !ost->copy_prior_start)
1367  return;
1368 
1369  if (of->recording_time != INT64_MAX &&
1370  ist->pts >= of->recording_time + of->start_time) {
1371  close_output_stream(ost);
1372  return;
1373  }
1374 
1375  /* force the input stream PTS */
1376  if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1377  audio_size += pkt->size;
1378  else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
1379  video_size += pkt->size;
1380  ost->sync_opts++;
1381  } else if (ost->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
1382  subtitle_size += pkt->size;
1383  }
1384 
1385  if (pkt->pts != AV_NOPTS_VALUE)
1386  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1387  else
1388  opkt.pts = AV_NOPTS_VALUE;
1389 
1390  if (pkt->dts == AV_NOPTS_VALUE)
1391  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1392  else
1393  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1394  opkt.dts -= ost_tb_start_time;
1395 
1396  if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1397  int duration = av_get_audio_frame_duration(ist->st->codec, pkt->size);
1398  if(!duration)
1399  duration = ist->st->codec->frame_size;
1400  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1402  ost->st->time_base) - ost_tb_start_time;
1403  }
1404 
1405  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1406  opkt.flags = pkt->flags;
1407 
1408  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1409  if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1410  && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1411  && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1412  && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1413  ) {
1414  if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))
1415  opkt.destruct = av_destruct_packet;
1416  } else {
1417  opkt.data = pkt->data;
1418  opkt.size = pkt->size;
1419  }
1420 
1421  if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1422  /* store AVPicture in AVPacket, as expected by the output format */
1423  avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1424  opkt.data = (uint8_t *)&pict;
1425  opkt.size = sizeof(AVPicture);
1426  opkt.flags |= AV_PKT_FLAG_KEY;
1427  }
1428 
1429  write_frame(of->ctx, &opkt, ost);
1430  ost->st->codec->frame_number++;
1431 }
1432 
1433 static void rate_emu_sleep(InputStream *ist)
1434 {
1435  if (input_files[ist->file_index]->rate_emu) {
1436  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
1437  int64_t now = av_gettime() - ist->start;
1438  if (pts > now)
1439  av_usleep(pts - now);
1440  }
1441 }
1442 
1444 {
1445  AVCodecContext *dec = ist->st->codec;
1446 
1447  if (!dec->channel_layout) {
1448  char layout_name[256];
1449 
1451  if (!dec->channel_layout)
1452  return 0;
1453  av_get_channel_layout_string(layout_name, sizeof(layout_name),
1454  dec->channels, dec->channel_layout);
1455  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1456  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1457  }
1458  return 1;
1459 }
1460 
1461 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1462 {
1463  AVFrame *decoded_frame;
1464  AVCodecContext *avctx = ist->st->codec;
1465  int i, ret, resample_changed;
1466  AVRational decoded_frame_tb;
1467 
1468  if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
1469  return AVERROR(ENOMEM);
1470  decoded_frame = ist->decoded_frame;
1471 
1473  ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1474  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1475 
1476  if (ret >= 0 && avctx->sample_rate <= 0) {
1477  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1478  ret = AVERROR_INVALIDDATA;
1479  }
1480 
1481  if (!*got_output || ret < 0) {
1482  if (!pkt->size) {
1483  for (i = 0; i < ist->nb_filters; i++)
1484  av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
1485  }
1486  return ret;
1487  }
1488 
1489 #if 1
1490  /* increment next_dts to use for the case where the input stream does not
1491  have timestamps or there are multiple frames in the packet */
1492  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1493  avctx->sample_rate;
1494  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1495  avctx->sample_rate;
1496 #endif
1497 
1498  rate_emu_sleep(ist);
1499 
1500  resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1501  ist->resample_channels != avctx->channels ||
1502  ist->resample_channel_layout != decoded_frame->channel_layout ||
1503  ist->resample_sample_rate != decoded_frame->sample_rate;
1504  if (resample_changed) {
1505  char layout1[64], layout2[64];
1506 
1507  if (!guess_input_channel_layout(ist)) {
1508  av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1509  "layout for Input Stream #%d.%d\n", ist->file_index,
1510  ist->st->index);
1511  exit(1);
1512  }
1513  decoded_frame->channel_layout = avctx->channel_layout;
1514 
1515  av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1517  av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1518  decoded_frame->channel_layout);
1519 
1521  "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1522  ist->file_index, ist->st->index,
1524  ist->resample_channels, layout1,
1525  decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
1526  avctx->channels, layout2);
1527 
1528  ist->resample_sample_fmt = decoded_frame->format;
1529  ist->resample_sample_rate = decoded_frame->sample_rate;
1530  ist->resample_channel_layout = decoded_frame->channel_layout;
1531  ist->resample_channels = avctx->channels;
1532 
1533  for (i = 0; i < nb_filtergraphs; i++)
1534  if (ist_in_filtergraph(filtergraphs[i], ist)) {
1535  FilterGraph *fg = filtergraphs[i];
1536  int j;
1537  if (configure_filtergraph(fg) < 0) {
1538  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1539  exit(1);
1540  }
1541  for (j = 0; j < fg->nb_outputs; j++) {
1542  OutputStream *ost = fg->outputs[j]->ost;
1543  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
1546  ost->st->codec->frame_size);
1547  }
1548  }
1549  }
1550 
1551  /* if the decoder provides a pts, use it instead of the last packet pts.
1552  the decoder could be delaying output by a packet or more. */
1553  if (decoded_frame->pts != AV_NOPTS_VALUE) {
1554  ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
1555  decoded_frame_tb = avctx->time_base;
1556  } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
1557  decoded_frame->pts = decoded_frame->pkt_pts;
1558  pkt->pts = AV_NOPTS_VALUE;
1559  decoded_frame_tb = ist->st->time_base;
1560  } else if (pkt->pts != AV_NOPTS_VALUE) {
1561  decoded_frame->pts = pkt->pts;
1562  pkt->pts = AV_NOPTS_VALUE;
1563  decoded_frame_tb = ist->st->time_base;
1564  }else {
1565  decoded_frame->pts = ist->dts;
1566  decoded_frame_tb = AV_TIME_BASE_Q;
1567  }
1568  if (decoded_frame->pts != AV_NOPTS_VALUE)
1569  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
1570  (AVRational){1, ist->st->codec->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
1571  (AVRational){1, ist->st->codec->sample_rate});
1572  for (i = 0; i < ist->nb_filters; i++)
1573  av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame,
1575 
1576  decoded_frame->pts = AV_NOPTS_VALUE;
1577 
1578  return ret;
1579 }
1580 
1581 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
1582 {
1583  AVFrame *decoded_frame;
1584  void *buffer_to_free = NULL;
1585  int i, ret = 0, resample_changed;
1586  int64_t best_effort_timestamp;
1587  AVRational *frame_sample_aspect;
1588 
1589  if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
1590  return AVERROR(ENOMEM);
1591  decoded_frame = ist->decoded_frame;
1592  pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
1593 
1595  ret = avcodec_decode_video2(ist->st->codec,
1596  decoded_frame, got_output, pkt);
1597  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
1598  if (!*got_output || ret < 0) {
1599  if (!pkt->size) {
1600  for (i = 0; i < ist->nb_filters; i++)
1601  av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
1602  }
1603  return ret;
1604  }
1605 
1606  if(ist->top_field_first>=0)
1607  decoded_frame->top_field_first = ist->top_field_first;
1608 
1609  best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
1610  if(best_effort_timestamp != AV_NOPTS_VALUE)
1611  ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
1612 
1613  if (debug_ts) {
1614  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
1615  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d \n",
1616  ist->st->index, av_ts2str(decoded_frame->pts),
1617  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
1618  best_effort_timestamp,
1619  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
1620  decoded_frame->key_frame, decoded_frame->pict_type);
1621  }
1622 
1623  pkt->size = 0;
1624  pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
1625 
1626  rate_emu_sleep(ist);
1627 
1628  if (ist->st->sample_aspect_ratio.num)
1629  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
1630 
1631  resample_changed = ist->resample_width != decoded_frame->width ||
1632  ist->resample_height != decoded_frame->height ||
1633  ist->resample_pix_fmt != decoded_frame->format;
1634  if (resample_changed) {
1636  "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
1637  ist->file_index, ist->st->index,
1639  decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
1640 
1641  ist->resample_width = decoded_frame->width;
1642  ist->resample_height = decoded_frame->height;
1643  ist->resample_pix_fmt = decoded_frame->format;
1644 
1645  for (i = 0; i < nb_filtergraphs; i++) {
1646  if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
1647  configure_filtergraph(filtergraphs[i]) < 0) {
1648  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1649  exit(1);
1650  }
1651  }
1652  }
1653 
1654  frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
1655  for (i = 0; i < ist->nb_filters; i++) {
1656  int changed = ist->st->codec->width != ist->filters[i]->filter->outputs[0]->w
1657  || ist->st->codec->height != ist->filters[i]->filter->outputs[0]->h
1658  || ist->st->codec->pix_fmt != ist->filters[i]->filter->outputs[0]->format;
1659 
1660  if (!frame_sample_aspect->num)
1661  *frame_sample_aspect = ist->st->sample_aspect_ratio;
1662  if (ist->dr1 && decoded_frame->type==FF_BUFFER_TYPE_USER && !changed) {
1663  FrameBuffer *buf = decoded_frame->opaque;
1665  decoded_frame->data, decoded_frame->linesize,
1667  ist->st->codec->width, ist->st->codec->height,
1668  ist->st->codec->pix_fmt);
1669 
1670  avfilter_copy_frame_props(fb, decoded_frame);
1671  fb->buf->priv = buf;
1673 
1674  av_assert0(buf->refcount>0);
1675  buf->refcount++;
1676  av_buffersrc_add_ref(ist->filters[i]->filter, fb,
1680  } else
1681  if(av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame, AV_BUFFERSRC_FLAG_PUSH)<0) {
1682  av_log(NULL, AV_LOG_FATAL, "Failed to inject frame into filter network\n");
1683  exit(1);
1684  }
1685 
1686  }
1687 
1688  av_free(buffer_to_free);
1689  return ret;
1690 }
1691 
1692 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
1693 {
1694  AVSubtitle subtitle;
1695  int i, ret = avcodec_decode_subtitle2(ist->st->codec,
1696  &subtitle, got_output, pkt);
1697  if (ret < 0 || !*got_output) {
1698  if (!pkt->size)
1699  sub2video_flush(ist);
1700  return ret;
1701  }
1702 
1703  if (ist->fix_sub_duration) {
1704  if (ist->prev_sub.got_output) {
1705  int end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
1706  1000, AV_TIME_BASE);
1707  if (end < ist->prev_sub.subtitle.end_display_time) {
1708  av_log(ist->st->codec, AV_LOG_DEBUG,
1709  "Subtitle duration reduced from %d to %d\n",
1710  ist->prev_sub.subtitle.end_display_time, end);
1712  }
1713  }
1714  FFSWAP(int, *got_output, ist->prev_sub.got_output);
1715  FFSWAP(int, ret, ist->prev_sub.ret);
1716  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
1717  }
1718 
1719  sub2video_update(ist, &subtitle);
1720 
1721  if (!*got_output || !subtitle.num_rects)
1722  return ret;
1723 
1724  rate_emu_sleep(ist);
1725 
1726  for (i = 0; i < nb_output_streams; i++) {
1727  OutputStream *ost = output_streams[i];
1728 
1729  if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
1730  continue;
1731 
1732  do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
1733  }
1734 
1735  avsubtitle_free(&subtitle);
1736  return ret;
1737 }
1738 
1739 /* pkt = NULL means EOF (needed to flush decoder buffers) */
1740 static int output_packet(InputStream *ist, const AVPacket *pkt)
1741 {
1742  int ret = 0, i;
1743  int got_output;
1744 
1745  AVPacket avpkt;
1746  if (!ist->saw_first_ts) {
1747  ist->dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
1748  ist->pts = 0;
1749  if (pkt != NULL && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
1750  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
1751  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
1752  }
1753  ist->saw_first_ts = 1;
1754  }
1755 
1756  if (ist->next_dts == AV_NOPTS_VALUE)
1757  ist->next_dts = ist->dts;
1758  if (ist->next_pts == AV_NOPTS_VALUE)
1759  ist->next_pts = ist->pts;
1760 
1761  if (pkt == NULL) {
1762  /* EOF handling */
1763  av_init_packet(&avpkt);
1764  avpkt.data = NULL;
1765  avpkt.size = 0;
1766  goto handle_eof;
1767  } else {
1768  avpkt = *pkt;
1769  }
1770 
1771  if (pkt->dts != AV_NOPTS_VALUE) {
1772  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
1773  if (ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
1774  ist->next_pts = ist->pts = ist->dts;
1775  }
1776 
1777  // while we have more to decode or while the decoder did output something on EOF
1778  while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
1779  int duration;
1780  handle_eof:
1781 
1782  ist->pts = ist->next_pts;
1783  ist->dts = ist->next_dts;
1784 
1785  if (avpkt.size && avpkt.size != pkt->size) {
1787  "Multiple frames in a packet from stream %d\n", pkt->stream_index);
1788  ist->showed_multi_packet_warning = 1;
1789  }
1790 
1791  switch (ist->st->codec->codec_type) {
1792  case AVMEDIA_TYPE_AUDIO:
1793  ret = decode_audio (ist, &avpkt, &got_output);
1794  break;
1795  case AVMEDIA_TYPE_VIDEO:
1796  ret = decode_video (ist, &avpkt, &got_output);
1797  if (avpkt.duration) {
1798  duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
1799  } else if(ist->st->codec->time_base.num != 0 && ist->st->codec->time_base.den != 0) {
1800  int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
1801  duration = ((int64_t)AV_TIME_BASE *
1802  ist->st->codec->time_base.num * ticks) /
1803  ist->st->codec->time_base.den;
1804  } else
1805  duration = 0;
1806 
1807  if(ist->dts != AV_NOPTS_VALUE && duration) {
1808  ist->next_dts += duration;
1809  }else
1810  ist->next_dts = AV_NOPTS_VALUE;
1811 
1812  if (got_output)
1813  ist->next_pts += duration; //FIXME the duration is not correct in some cases
1814  break;
1815  case AVMEDIA_TYPE_SUBTITLE:
1816  ret = transcode_subtitles(ist, &avpkt, &got_output);
1817  break;
1818  default:
1819  return -1;
1820  }
1821 
1822  if (ret < 0)
1823  return ret;
1824 
1825  avpkt.dts=
1826  avpkt.pts= AV_NOPTS_VALUE;
1827 
1828  // touch data and size only if not EOF
1829  if (pkt) {
1830  if(ist->st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
1831  ret = avpkt.size;
1832  avpkt.data += ret;
1833  avpkt.size -= ret;
1834  }
1835  if (!got_output) {
1836  continue;
1837  }
1838  }
1839 
1840  /* handle stream copy */
1841  if (!ist->decoding_needed) {
1842  rate_emu_sleep(ist);
1843  ist->dts = ist->next_dts;
1844  switch (ist->st->codec->codec_type) {
1845  case AVMEDIA_TYPE_AUDIO:
1846  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
1847  ist->st->codec->sample_rate;
1848  break;
1849  case AVMEDIA_TYPE_VIDEO:
1850  if (pkt->duration) {
1851  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
1852  } else if(ist->st->codec->time_base.num != 0) {
1853  int ticks= ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
1854  ist->next_dts += ((int64_t)AV_TIME_BASE *
1855  ist->st->codec->time_base.num * ticks) /
1856  ist->st->codec->time_base.den;
1857  }
1858  break;
1859  }
1860  ist->pts = ist->dts;
1861  ist->next_pts = ist->next_dts;
1862  }
1863  for (i = 0; pkt && i < nb_output_streams; i++) {
1864  OutputStream *ost = output_streams[i];
1865 
1866  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
1867  continue;
1868 
1869  do_streamcopy(ist, ost, pkt);
1870  }
1871 
1872  return 0;
1873 }
1874 
1875 static void print_sdp(void)
1876 {
1877  char sdp[2048];
1878  int i;
1879  AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
1880 
1881  if (!avc)
1882  exit(1);
1883  for (i = 0; i < nb_output_files; i++)
1884  avc[i] = output_files[i]->ctx;
1885 
1886  av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
1887  printf("SDP:\n%s\n", sdp);
1888  fflush(stdout);
1889  av_freep(&avc);
1890 }
1891 
1892 static int init_input_stream(int ist_index, char *error, int error_len)
1893 {
1894  int ret;
1895  InputStream *ist = input_streams[ist_index];
1896 
1897  if (ist->decoding_needed) {
1898  AVCodec *codec = ist->dec;
1899  if (!codec) {
1900  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
1901  avcodec_get_name(ist->st->codec->codec_id), ist->file_index, ist->st->index);
1902  return AVERROR(EINVAL);
1903  }
1904 
1905  ist->dr1 = (codec->capabilities & CODEC_CAP_DR1) && !do_deinterlace;
1906  if (codec->type == AVMEDIA_TYPE_VIDEO && ist->dr1) {
1909  ist->st->codec->opaque = &ist->buffer_pool;
1910  }
1911 
1912  if (!av_dict_get(ist->opts, "threads", NULL, 0))
1913  av_dict_set(&ist->opts, "threads", "auto", 0);
1914  if ((ret = avcodec_open2(ist->st->codec, codec, &ist->opts)) < 0) {
1915  if (ret == AVERROR_EXPERIMENTAL)
1916  abort_codec_experimental(codec, 0);
1917  snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
1918  ist->file_index, ist->st->index);
1919  return ret;
1920  }
1921  assert_avoptions(ist->opts);
1922  }
1923 
1924  ist->next_pts = AV_NOPTS_VALUE;
1925  ist->next_dts = AV_NOPTS_VALUE;
1926  ist->is_start = 1;
1927 
1928  return 0;
1929 }
1930 
1932 {
1933  if (ost->source_index >= 0)
1934  return input_streams[ost->source_index];
1935  return NULL;
1936 }
1937 
1938 static void parse_forced_key_frames(char *kf, OutputStream *ost,
1939  AVCodecContext *avctx)
1940 {
1941  char *p;
1942  int n = 1, i;
1943  int64_t t;
1944 
1945  for (p = kf; *p; p++)
1946  if (*p == ',')
1947  n++;
1948  ost->forced_kf_count = n;
1949  ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n);
1950  if (!ost->forced_kf_pts) {
1951  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
1952  exit(1);
1953  }
1954 
1955  p = kf;
1956  for (i = 0; i < n; i++) {
1957  char *next = strchr(p, ',');
1958 
1959  if (next)
1960  *next++ = 0;
1961 
1962  t = parse_time_or_die("force_key_frames", p, 1);
1963  ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
1964 
1965  p = next;
1966  }
1967 }
1968 
1969 static void report_new_stream(int input_index, AVPacket *pkt)
1970 {
1971  InputFile *file = input_files[input_index];
1972  AVStream *st = file->ctx->streams[pkt->stream_index];
1973 
1974  if (pkt->stream_index < file->nb_streams_warn)
1975  return;
1976  av_log(file->ctx, AV_LOG_WARNING,
1977  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
1979  input_index, pkt->stream_index,
1980  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
1981  file->nb_streams_warn = pkt->stream_index + 1;
1982 }
1983 
1984 static int transcode_init(void)
1985 {
1986  int ret = 0, i, j, k;
1987  AVFormatContext *oc;
1988  AVCodecContext *codec;
1989  OutputStream *ost;
1990  InputStream *ist;
1991  char error[1024];
1992  int want_sdp = 1;
1993 
1994  /* init framerate emulation */
1995  for (i = 0; i < nb_input_files; i++) {
1996  InputFile *ifile = input_files[i];
1997  if (ifile->rate_emu)
1998  for (j = 0; j < ifile->nb_streams; j++)
1999  input_streams[j + ifile->ist_index]->start = av_gettime();
2000  }
2001 
2002  /* output stream init */
2003  for (i = 0; i < nb_output_files; i++) {
2004  oc = output_files[i]->ctx;
2005  if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
2006  av_dump_format(oc, i, oc->filename, 1);
2007  av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
2008  return AVERROR(EINVAL);
2009  }
2010  }
2011 
2012  /* init complex filtergraphs */
2013  for (i = 0; i < nb_filtergraphs; i++)
2014  if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
2015  return ret;
2016 
2017  /* for each output stream, we compute the right encoding parameters */
2018  for (i = 0; i < nb_output_streams; i++) {
2019  AVCodecContext *icodec = NULL;
2020  ost = output_streams[i];
2021  oc = output_files[ost->file_index]->ctx;
2022  ist = get_input_stream(ost);
2023 
2024  if (ost->attachment_filename)
2025  continue;
2026 
2027  codec = ost->st->codec;
2028 
2029  if (ist) {
2030  icodec = ist->st->codec;
2031 
2032  ost->st->disposition = ist->st->disposition;
2033  codec->bits_per_raw_sample = icodec->bits_per_raw_sample;
2035  }
2036 
2037  if (ost->stream_copy) {
2038  uint64_t extra_size;
2039 
2040  av_assert0(ist && !ost->filter);
2041 
2042  extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
2043 
2044  if (extra_size > INT_MAX) {
2045  return AVERROR(EINVAL);
2046  }
2047 
2048  /* if stream_copy is selected, no need to decode or encode */
2049  codec->codec_id = icodec->codec_id;
2050  codec->codec_type = icodec->codec_type;
2051 
2052  if (!codec->codec_tag) {
2053  if (!oc->oformat->codec_tag ||
2054  av_codec_get_id (oc->oformat->codec_tag, icodec->codec_tag) == codec->codec_id ||
2055  av_codec_get_tag(oc->oformat->codec_tag, icodec->codec_id) <= 0)
2056  codec->codec_tag = icodec->codec_tag;
2057  }
2058 
2059  codec->bit_rate = icodec->bit_rate;
2060  codec->rc_max_rate = icodec->rc_max_rate;
2061  codec->rc_buffer_size = icodec->rc_buffer_size;
2062  codec->field_order = icodec->field_order;
2063  codec->extradata = av_mallocz(extra_size);
2064  if (!codec->extradata) {
2065  return AVERROR(ENOMEM);
2066  }
2067  memcpy(codec->extradata, icodec->extradata, icodec->extradata_size);
2068  codec->extradata_size= icodec->extradata_size;
2070 
2071  codec->time_base = ist->st->time_base;
2072  /*
2073  * Avi is a special case here because it supports variable fps but
2074  * having the fps and timebase differe significantly adds quite some
2075  * overhead
2076  */
2077  if(!strcmp(oc->oformat->name, "avi")) {
2078  if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2079  && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2080  && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(icodec->time_base)
2081  && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(icodec->time_base) < 1.0/500
2082  || copy_tb==2){
2083  codec->time_base.num = ist->st->r_frame_rate.den;
2084  codec->time_base.den = 2*ist->st->r_frame_rate.num;
2085  codec->ticks_per_frame = 2;
2086  } else if ( copy_tb<0 && av_q2d(icodec->time_base)*icodec->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2087  && av_q2d(ist->st->time_base) < 1.0/500
2088  || copy_tb==0){
2089  codec->time_base = icodec->time_base;
2090  codec->time_base.num *= icodec->ticks_per_frame;
2091  codec->time_base.den *= 2;
2092  codec->ticks_per_frame = 2;
2093  }
2094  } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2095  && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2096  && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2097  && strcmp(oc->oformat->name, "f4v")
2098  ) {
2099  if( copy_tb<0 && icodec->time_base.den
2100  && av_q2d(icodec->time_base)*icodec->ticks_per_frame > av_q2d(ist->st->time_base)
2101  && av_q2d(ist->st->time_base) < 1.0/500
2102  || copy_tb==0){
2103  codec->time_base = icodec->time_base;
2104  codec->time_base.num *= icodec->ticks_per_frame;
2105  }
2106  }
2107  if ( codec->codec_tag == AV_RL32("tmcd")
2108  && icodec->time_base.num < icodec->time_base.den
2109  && icodec->time_base.num > 0
2110  && 121LL*icodec->time_base.num > icodec->time_base.den) {
2111  codec->time_base = icodec->time_base;
2112  }
2113 
2114  if(ost->frame_rate.num)
2115  codec->time_base = av_inv_q(ost->frame_rate);
2116 
2117  av_reduce(&codec->time_base.num, &codec->time_base.den,
2118  codec->time_base.num, codec->time_base.den, INT_MAX);
2119 
2120  switch (codec->codec_type) {
2121  case AVMEDIA_TYPE_AUDIO:
2122  if (audio_volume != 256) {
2123  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2124  exit(1);
2125  }
2126  codec->channel_layout = icodec->channel_layout;
2127  codec->sample_rate = icodec->sample_rate;
2128  codec->channels = icodec->channels;
2129  codec->frame_size = icodec->frame_size;
2130  codec->audio_service_type = icodec->audio_service_type;
2131  codec->block_align = icodec->block_align;
2132  if((codec->block_align == 1 || codec->block_align == 1152) && codec->codec_id == AV_CODEC_ID_MP3)
2133  codec->block_align= 0;
2134  if(codec->codec_id == AV_CODEC_ID_AC3)
2135  codec->block_align= 0;
2136  break;
2137  case AVMEDIA_TYPE_VIDEO:
2138  codec->pix_fmt = icodec->pix_fmt;
2139  codec->width = icodec->width;
2140  codec->height = icodec->height;
2141  codec->has_b_frames = icodec->has_b_frames;
2142  if (!codec->sample_aspect_ratio.num) {
2143  codec->sample_aspect_ratio =
2144  ost->st->sample_aspect_ratio =
2146  ist->st->codec->sample_aspect_ratio.num ?
2147  ist->st->codec->sample_aspect_ratio : (AVRational){0, 1};
2148  }
2149  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2150  break;
2151  case AVMEDIA_TYPE_SUBTITLE:
2152  codec->width = icodec->width;
2153  codec->height = icodec->height;
2154  break;
2155  case AVMEDIA_TYPE_DATA:
2157  break;
2158  default:
2159  abort();
2160  }
2161  } else {
2162  if (!ost->enc)
2163  ost->enc = avcodec_find_encoder(codec->codec_id);
2164  if (!ost->enc) {
2165  /* should only happen when a default codec is not present. */
2166  snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
2167  avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
2168  ret = AVERROR(EINVAL);
2169  goto dump_format;
2170  }
2171 
2172  if (ist)
2173  ist->decoding_needed++;
2174  ost->encoding_needed = 1;
2175 
2176  if (!ost->filter &&
2177  (codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2178  codec->codec_type == AVMEDIA_TYPE_AUDIO)) {
2179  FilterGraph *fg;
2180  fg = init_simple_filtergraph(ist, ost);
2181  if (configure_filtergraph(fg)) {
2182  av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
2183  exit(1);
2184  }
2185  }
2186 
2187  if (codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2188  if (ost->filter && !ost->frame_rate.num)
2190  if (ist && !ost->frame_rate.num)
2191  ost->frame_rate = ist->framerate;
2192  if (ist && !ost->frame_rate.num)
2193  ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25, 1};
2194 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
2195  if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
2197  ost->frame_rate = ost->enc->supported_framerates[idx];
2198  }
2199  }
2200 
2201  switch (codec->codec_type) {
2202  case AVMEDIA_TYPE_AUDIO:
2203  codec->sample_fmt = ost->filter->filter->inputs[0]->format;
2204  codec->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
2205  codec->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
2207  codec->time_base = (AVRational){ 1, codec->sample_rate };
2208  break;
2209  case AVMEDIA_TYPE_VIDEO:
2210  codec->time_base = av_inv_q(ost->frame_rate);
2211  if (ost->filter && !(codec->time_base.num && codec->time_base.den))
2212  codec->time_base = ost->filter->filter->inputs[0]->time_base;
2213  if ( av_q2d(codec->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
2215  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
2216  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
2217  }
2218  for (j = 0; j < ost->forced_kf_count; j++)
2219  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
2221  codec->time_base);
2222 
2223  codec->width = ost->filter->filter->inputs[0]->w;
2224  codec->height = ost->filter->filter->inputs[0]->h;
2225  codec->sample_aspect_ratio = ost->st->sample_aspect_ratio =
2226  ost->frame_aspect_ratio ? // overridden by the -aspect cli option
2227  av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) :
2229  codec->pix_fmt = ost->filter->filter->inputs[0]->format;
2230 
2231  if (!icodec ||
2232  codec->width != icodec->width ||
2233  codec->height != icodec->height ||
2234  codec->pix_fmt != icodec->pix_fmt) {
2236  }
2237 
2238  if (ost->forced_keyframes)
2240  ost->st->codec);
2241  break;
2242  case AVMEDIA_TYPE_SUBTITLE:
2243  codec->time_base = (AVRational){1, 1000};
2244  if (!codec->width) {
2245  codec->width = input_streams[ost->source_index]->st->codec->width;
2246  codec->height = input_streams[ost->source_index]->st->codec->height;
2247  }
2248  break;
2249  default:
2250  abort();
2251  break;
2252  }
2253  /* two pass mode */
2254  if (codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2)) {
2255  char logfilename[1024];
2256  FILE *f;
2257 
2258  snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
2259  ost->logfile_prefix ? ost->logfile_prefix :
2261  i);
2262  if (!strcmp(ost->enc->name, "libx264")) {
2263  av_dict_set(&ost->opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
2264  } else {
2265  if (codec->flags & CODEC_FLAG_PASS2) {
2266  char *logbuffer;
2267  size_t logbuffer_size;
2268  if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
2269  av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
2270  logfilename);
2271  exit(1);
2272  }
2273  codec->stats_in = logbuffer;
2274  }
2275  if (codec->flags & CODEC_FLAG_PASS1) {
2276  f = fopen(logfilename, "wb");
2277  if (!f) {
2278  av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
2279  logfilename, strerror(errno));
2280  exit(1);
2281  }
2282  ost->logfile = f;
2283  }
2284  }
2285  }
2286  }
2287  }
2288 
2289  /* open each encoder */
2290  for (i = 0; i < nb_output_streams; i++) {
2291  ost = output_streams[i];
2292  if (ost->encoding_needed) {
2293  AVCodec *codec = ost->enc;
2294  AVCodecContext *dec = NULL;
2295 
2296  if ((ist = get_input_stream(ost)))
2297  dec = ist->st->codec;
2298  if (dec && dec->subtitle_header) {
2299  /* ASS code assumes this buffer is null terminated so add extra byte. */
2301  if (!ost->st->codec->subtitle_header) {
2302  ret = AVERROR(ENOMEM);
2303  goto dump_format;
2304  }
2305  memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2307  }
2308  if (!av_dict_get(ost->opts, "threads", NULL, 0))
2309  av_dict_set(&ost->opts, "threads", "auto", 0);
2310  if ((ret = avcodec_open2(ost->st->codec, codec, &ost->opts)) < 0) {
2311  if (ret == AVERROR_EXPERIMENTAL)
2312  abort_codec_experimental(codec, 1);
2313  snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
2314  ost->file_index, ost->index);
2315  goto dump_format;
2316  }
2317  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2320  ost->st->codec->frame_size);
2321  assert_avoptions(ost->opts);
2322  if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000)
2323  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2324  " It takes bits/s as argument, not kbits/s\n");
2325  extra_size += ost->st->codec->extradata_size;
2326 
2327  if (ost->st->codec->me_threshold)
2328  input_streams[ost->source_index]->st->codec->debug |= FF_DEBUG_MV;
2329  }
2330  }
2331 
2332  /* init input streams */
2333  for (i = 0; i < nb_input_streams; i++)
2334  if ((ret = init_input_stream(i, error, sizeof(error))) < 0)
2335  goto dump_format;
2336 
2337  /* discard unused programs */
2338  for (i = 0; i < nb_input_files; i++) {
2339  InputFile *ifile = input_files[i];
2340  for (j = 0; j < ifile->ctx->nb_programs; j++) {
2341  AVProgram *p = ifile->ctx->programs[j];
2342  int discard = AVDISCARD_ALL;
2343 
2344  for (k = 0; k < p->nb_stream_indexes; k++)
2345  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
2346  discard = AVDISCARD_DEFAULT;
2347  break;
2348  }
2349  p->discard = discard;
2350  }
2351  }
2352 
2353  /* open files and write file headers */
2354  for (i = 0; i < nb_output_files; i++) {
2355  oc = output_files[i]->ctx;
2356  oc->interrupt_callback = int_cb;
2357  if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
2358  char errbuf[128];
2359  const char *errbuf_ptr = errbuf;
2360  if (av_strerror(ret, errbuf, sizeof(errbuf)) < 0)
2361  errbuf_ptr = strerror(AVUNERROR(ret));
2362  snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?): %s", i, errbuf_ptr);
2363  ret = AVERROR(EINVAL);
2364  goto dump_format;
2365  }
2366 // assert_avoptions(output_files[i]->opts);
2367  if (strcmp(oc->oformat->name, "rtp")) {
2368  want_sdp = 0;
2369  }
2370  }
2371 
2372  dump_format:
2373  /* dump the file output parameters - cannot be done before in case
2374  of stream copy */
2375  for (i = 0; i < nb_output_files; i++) {
2376  av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
2377  }
2378 
2379  /* dump the stream mapping */
2380  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
2381  for (i = 0; i < nb_input_streams; i++) {
2382  ist = input_streams[i];
2383 
2384  for (j = 0; j < ist->nb_filters; j++) {
2385  if (ist->filters[j]->graph->graph_desc) {
2386  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
2387  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
2388  ist->filters[j]->name);
2389  if (nb_filtergraphs > 1)
2390  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
2391  av_log(NULL, AV_LOG_INFO, "\n");
2392  }
2393  }
2394  }
2395 
2396  for (i = 0; i < nb_output_streams; i++) {
2397  ost = output_streams[i];
2398 
2399  if (ost->attachment_filename) {
2400  /* an attached file */
2401  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
2402  ost->attachment_filename, ost->file_index, ost->index);
2403  continue;
2404  }
2405 
2406  if (ost->filter && ost->filter->graph->graph_desc) {
2407  /* output from a complex graph */
2408  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
2409  if (nb_filtergraphs > 1)
2410  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
2411 
2412  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
2413  ost->index, ost->enc ? ost->enc->name : "?");
2414  continue;
2415  }
2416 
2417  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
2418  input_streams[ost->source_index]->file_index,
2419  input_streams[ost->source_index]->st->index,
2420  ost->file_index,
2421  ost->index);
2422  if (ost->sync_ist != input_streams[ost->source_index])
2423  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
2424  ost->sync_ist->file_index,
2425  ost->sync_ist->st->index);
2426  if (ost->stream_copy)
2427  av_log(NULL, AV_LOG_INFO, " (copy)");
2428  else
2429  av_log(NULL, AV_LOG_INFO, " (%s -> %s)", input_streams[ost->source_index]->dec ?
2430  input_streams[ost->source_index]->dec->name : "?",
2431  ost->enc ? ost->enc->name : "?");
2432  av_log(NULL, AV_LOG_INFO, "\n");
2433  }
2434 
2435  if (ret) {
2436  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
2437  return ret;
2438  }
2439 
2440  if (want_sdp) {
2441  print_sdp();
2442  }
2443 
2444  return 0;
2445 }
2446 
2447 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
2448 static int need_output(void)
2449 {
2450  int i;
2451 
2452  for (i = 0; i < nb_output_streams; i++) {
2453  OutputStream *ost = output_streams[i];
2454  OutputFile *of = output_files[ost->file_index];
2455  AVFormatContext *os = output_files[ost->file_index]->ctx;
2456 
2457  if (ost->finished ||
2458  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
2459  continue;
2460  if (ost->frame_number >= ost->max_frames) {
2461  int j;
2462  for (j = 0; j < of->ctx->nb_streams; j++)
2463  close_output_stream(output_streams[of->ost_index + j]);
2464  continue;
2465  }
2466 
2467  return 1;
2468  }
2469 
2470  return 0;
2471 }
2472 
2473 /**
2474  * Select the output stream to process.
2475  *
2476  * @return selected output stream, or NULL if none available
2477  */
2479 {
2480  int i;
2481  int64_t opts_min = INT64_MAX;
2482  OutputStream *ost_min = NULL;
2483 
2484  for (i = 0; i < nb_output_streams; i++) {
2485  OutputStream *ost = output_streams[i];
2486  int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
2487  AV_TIME_BASE_Q);
2488  if (!ost->unavailable && !ost->finished && opts < opts_min) {
2489  opts_min = opts;
2490  ost_min = ost;
2491  }
2492  }
2493  return ost_min;
2494 }
2495 
2497 {
2498  int i, ret, key;
2499  static int64_t last_time;
2500  if (received_nb_signals)
2501  return AVERROR_EXIT;
2502  /* read_key() returns 0 on EOF */
2503  if(cur_time - last_time >= 100000 && !run_as_daemon){
2504  key = read_key();
2505  last_time = cur_time;
2506  }else
2507  key = -1;
2508  if (key == 'q')
2509  return AVERROR_EXIT;
2510  if (key == '+') av_log_set_level(av_log_get_level()+10);
2511  if (key == '-') av_log_set_level(av_log_get_level()-10);
2512  if (key == 's') qp_hist ^= 1;
2513  if (key == 'h'){
2514  if (do_hex_dump){
2515  do_hex_dump = do_pkt_dump = 0;
2516  } else if(do_pkt_dump){
2517  do_hex_dump = 1;
2518  } else
2519  do_pkt_dump = 1;
2521  }
2522  if (key == 'c' || key == 'C'){
2523  char buf[4096], target[64], command[256], arg[256] = {0};
2524  double time;
2525  int k, n = 0;
2526  fprintf(stderr, "\nEnter command: <target> <time> <command>[ <argument>]\n");
2527  i = 0;
2528  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
2529  if (k > 0)
2530  buf[i++] = k;
2531  buf[i] = 0;
2532  if (k > 0 &&
2533  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
2534  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
2535  target, time, command, arg);
2536  for (i = 0; i < nb_filtergraphs; i++) {
2537  FilterGraph *fg = filtergraphs[i];
2538  if (fg->graph) {
2539  if (time < 0) {
2540  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
2541  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
2542  fprintf(stderr, "Command reply for stream %d: ret:%d res:%s\n", i, ret, buf);
2543  } else {
2544  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
2545  }
2546  }
2547  }
2548  } else {
2550  "Parse error, at least 3 arguments were expected, "
2551  "only %d given in string '%s'\n", n, buf);
2552  }
2553  }
2554  if (key == 'd' || key == 'D'){
2555  int debug=0;
2556  if(key == 'D') {
2557  debug = input_streams[0]->st->codec->debug<<1;
2558  if(!debug) debug = 1;
2559  while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
2560  debug += debug;
2561  }else
2562  if(scanf("%d", &debug)!=1)
2563  fprintf(stderr,"error parsing debug value\n");
2564  for(i=0;i<nb_input_streams;i++) {
2565  input_streams[i]->st->codec->debug = debug;
2566  }
2567  for(i=0;i<nb_output_streams;i++) {
2568  OutputStream *ost = output_streams[i];
2569  ost->st->codec->debug = debug;
2570  }
2571  if(debug) av_log_set_level(AV_LOG_DEBUG);
2572  fprintf(stderr,"debug=%d\n", debug);
2573  }
2574  if (key == '?'){
2575  fprintf(stderr, "key function\n"
2576  "? show this help\n"
2577  "+ increase verbosity\n"
2578  "- decrease verbosity\n"
2579  "c Send command to filtergraph\n"
2580  "D cycle through available debug modes\n"
2581  "h dump packets/hex press to cycle through the 3 states\n"
2582  "q quit\n"
2583  "s Show QP histogram\n"
2584  );
2585  }
2586  return 0;
2587 }
2588 
2589 #if HAVE_PTHREADS
2590 static void *input_thread(void *arg)
2591 {
2592  InputFile *f = arg;
2593  int ret = 0;
2594 
2595  while (!transcoding_finished && ret >= 0) {
2596  AVPacket pkt;
2597  ret = av_read_frame(f->ctx, &pkt);
2598 
2599  if (ret == AVERROR(EAGAIN)) {
2600  av_usleep(10000);
2601  ret = 0;
2602  continue;
2603  } else if (ret < 0)
2604  break;
2605 
2606  pthread_mutex_lock(&f->fifo_lock);
2607  while (!av_fifo_space(f->fifo))
2608  pthread_cond_wait(&f->fifo_cond, &f->fifo_lock);
2609 
2610  av_dup_packet(&pkt);
2611  av_fifo_generic_write(f->fifo, &pkt, sizeof(pkt), NULL);
2612 
2613  pthread_mutex_unlock(&f->fifo_lock);
2614  }
2615 
2616  f->finished = 1;
2617  return NULL;
2618 }
2619 
2620 static void free_input_threads(void)
2621 {
2622  int i;
2623 
2624  if (nb_input_files == 1)
2625  return;
2626 
2627  transcoding_finished = 1;
2628 
2629  for (i = 0; i < nb_input_files; i++) {
2630  InputFile *f = input_files[i];
2631  AVPacket pkt;
2632 
2633  if (!f->fifo || f->joined)
2634  continue;
2635 
2636  pthread_mutex_lock(&f->fifo_lock);
2637  while (av_fifo_size(f->fifo)) {
2638  av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
2639  av_free_packet(&pkt);
2640  }
2641  pthread_cond_signal(&f->fifo_cond);
2642  pthread_mutex_unlock(&f->fifo_lock);
2643 
2644  pthread_join(f->thread, NULL);
2645  f->joined = 1;
2646 
2647  while (av_fifo_size(f->fifo)) {
2648  av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
2649  av_free_packet(&pkt);
2650  }
2651  av_fifo_free(f->fifo);
2652  }
2653 }
2654 
2655 static int init_input_threads(void)
2656 {
2657  int i, ret;
2658 
2659  if (nb_input_files == 1)
2660  return 0;
2661 
2662  for (i = 0; i < nb_input_files; i++) {
2663  InputFile *f = input_files[i];
2664 
2665  if (!(f->fifo = av_fifo_alloc(8*sizeof(AVPacket))))
2666  return AVERROR(ENOMEM);
2667 
2668  pthread_mutex_init(&f->fifo_lock, NULL);
2669  pthread_cond_init (&f->fifo_cond, NULL);
2670 
2671  if ((ret = pthread_create(&f->thread, NULL, input_thread, f)))
2672  return AVERROR(ret);
2673  }
2674  return 0;
2675 }
2676 
2677 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
2678 {
2679  int ret = 0;
2680 
2681  pthread_mutex_lock(&f->fifo_lock);
2682 
2683  if (av_fifo_size(f->fifo)) {
2684  av_fifo_generic_read(f->fifo, pkt, sizeof(*pkt), NULL);
2685  pthread_cond_signal(&f->fifo_cond);
2686  } else {
2687  if (f->finished)
2688  ret = AVERROR_EOF;
2689  else
2690  ret = AVERROR(EAGAIN);
2691  }
2692 
2693  pthread_mutex_unlock(&f->fifo_lock);
2694 
2695  return ret;
2696 }
2697 #endif
2698 
2700 {
2701 #if HAVE_PTHREADS
2702  if (nb_input_files > 1)
2703  return get_input_packet_mt(f, pkt);
2704 #endif
2705  return av_read_frame(f->ctx, pkt);
2706 }
2707 
2708 static int got_eagain(void)
2709 {
2710  int i;
2711  for (i = 0; i < nb_output_streams; i++)
2712  if (output_streams[i]->unavailable)
2713  return 1;
2714  return 0;
2715 }
2716 
2717 static void reset_eagain(void)
2718 {
2719  int i;
2720  for (i = 0; i < nb_input_files; i++)
2721  input_files[i]->eagain = 0;
2722  for (i = 0; i < nb_output_streams; i++)
2723  output_streams[i]->unavailable = 0;
2724 }
2725 
2726 /*
2727  * Return
2728  * - 0 -- one packet was read and processed
2729  * - AVERROR(EAGAIN) -- no packets were available for selected file,
2730  * this function should be called again
2731  * - AVERROR_EOF -- this function should not be called again
2732  */
2733 static int process_input(int file_index)
2734 {
2735  InputFile *ifile = input_files[file_index];
2736  AVFormatContext *is;
2737  InputStream *ist;
2738  AVPacket pkt;
2739  int ret, i, j;
2740 
2741  is = ifile->ctx;
2742  ret = get_input_packet(ifile, &pkt);
2743 
2744  if (ret == AVERROR(EAGAIN)) {
2745  ifile->eagain = 1;
2746  return ret;
2747  }
2748  if (ret < 0) {
2749  if (ret != AVERROR_EOF) {
2750  print_error(is->filename, ret);
2751  if (exit_on_error)
2752  exit(1);
2753  }
2754  ifile->eof_reached = 1;
2755 
2756  for (i = 0; i < ifile->nb_streams; i++) {
2757  ist = input_streams[ifile->ist_index + i];
2758  if (ist->decoding_needed)
2759  output_packet(ist, NULL);
2760 
2761  /* mark all outputs that don't go through lavfi as finished */
2762  for (j = 0; j < nb_output_streams; j++) {
2763  OutputStream *ost = output_streams[j];
2764 
2765  if (ost->source_index == ifile->ist_index + i &&
2766  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
2767  close_output_stream(ost);
2768  }
2769  }
2770 
2771  return AVERROR(EAGAIN);
2772  }
2773 
2774  reset_eagain();
2775 
2776  if (do_pkt_dump) {
2778  is->streams[pkt.stream_index]);
2779  }
2780  /* the following test is needed in case new streams appear
2781  dynamically in stream : we ignore them */
2782  if (pkt.stream_index >= ifile->nb_streams) {
2783  report_new_stream(file_index, &pkt);
2784  goto discard_packet;
2785  }
2786 
2787  ist = input_streams[ifile->ist_index + pkt.stream_index];
2788  if (ist->discard)
2789  goto discard_packet;
2790 
2791  if (debug_ts) {
2792  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
2793  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
2797  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
2798  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
2799  av_ts2str(input_files[ist->file_index]->ts_offset),
2800  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
2801  }
2802 
2803  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
2804  int64_t stime, stime2;
2805  // Correcting starttime based on the enabled streams
2806  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
2807  // so we instead do it here as part of discontinuity handling
2808  if ( ist->next_dts == AV_NOPTS_VALUE
2809  && ifile->ts_offset == -is->start_time
2810  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
2811  int64_t new_start_time = INT64_MAX;
2812  for (i=0; i<is->nb_streams; i++) {
2813  AVStream *st = is->streams[i];
2814  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
2815  continue;
2816  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
2817  }
2818  if (new_start_time > is->start_time) {
2819  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
2820  ifile->ts_offset = -new_start_time;
2821  }
2822  }
2823 
2824  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
2825  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
2826  ist->wrap_correction_done = 1;
2827 
2828  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
2829  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
2830  ist->wrap_correction_done = 0;
2831  }
2832  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
2833  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
2834  ist->wrap_correction_done = 0;
2835  }
2836  }
2837 
2838  if (pkt.dts != AV_NOPTS_VALUE)
2839  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2840  if (pkt.pts != AV_NOPTS_VALUE)
2841  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2842 
2843  if (pkt.pts != AV_NOPTS_VALUE)
2844  pkt.pts *= ist->ts_scale;
2845  if (pkt.dts != AV_NOPTS_VALUE)
2846  pkt.dts *= ist->ts_scale;
2847 
2848  if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
2849  !copy_ts) {
2850  int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
2851  int64_t delta = pkt_dts - ist->next_dts;
2852  if (is->iformat->flags & AVFMT_TS_DISCONT) {
2853  if(delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
2854  (delta > 1LL*dts_delta_threshold*AV_TIME_BASE &&
2856  pkt_dts+1<ist->pts){
2857  ifile->ts_offset -= delta;
2859  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
2860  delta, ifile->ts_offset);
2861  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2862  if (pkt.pts != AV_NOPTS_VALUE)
2863  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2864  }
2865  } else {
2866  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
2868  ) {
2869  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
2870  pkt.dts = AV_NOPTS_VALUE;
2871  }
2872  if (pkt.pts != AV_NOPTS_VALUE){
2873  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
2874  delta = pkt_pts - ist->next_dts;
2875  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
2877  ) {
2878  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
2879  pkt.pts = AV_NOPTS_VALUE;
2880  }
2881  }
2882  }
2883  }
2884 
2885  if (debug_ts) {
2886  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
2888  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
2889  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
2890  av_ts2str(input_files[ist->file_index]->ts_offset),
2891  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
2892  }
2893 
2894  sub2video_heartbeat(ist, pkt.pts);
2895 
2896  ret = output_packet(ist, &pkt);
2897  if (ret < 0) {
2898  char buf[128];
2899  av_strerror(ret, buf, sizeof(buf));
2900  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2901  ist->file_index, ist->st->index, buf);
2902  if (exit_on_error)
2903  exit(1);
2904  }
2905 
2906 discard_packet:
2907  av_free_packet(&pkt);
2908 
2909  return 0;
2910 }
2911 
2912 /**
2913  * Perform a step of transcoding for the specified filter graph.
2914  *
2915  * @param[in] graph filter graph to consider
2916  * @param[out] best_ist input stream where a frame would allow to continue
2917  * @return 0 for success, <0 for error
2918  */
2919 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
2920 {
2921  int i, ret;
2922  int nb_requests, nb_requests_max = 0;
2923  InputFilter *ifilter;
2924  InputStream *ist;
2925 
2926  *best_ist = NULL;
2927  ret = avfilter_graph_request_oldest(graph->graph);
2928  if (ret >= 0)
2929  return reap_filters();
2930 
2931  if (ret == AVERROR_EOF) {
2932  ret = reap_filters();
2933  for (i = 0; i < graph->nb_outputs; i++)
2934  close_output_stream(graph->outputs[i]->ost);
2935  return ret;
2936  }
2937  if (ret != AVERROR(EAGAIN))
2938  return ret;
2939 
2940  for (i = 0; i < graph->nb_inputs; i++) {
2941  ifilter = graph->inputs[i];
2942  ist = ifilter->ist;
2943  if (input_files[ist->file_index]->eagain ||
2944  input_files[ist->file_index]->eof_reached)
2945  continue;
2946  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
2947  if (nb_requests > nb_requests_max) {
2948  nb_requests_max = nb_requests;
2949  *best_ist = ist;
2950  }
2951  }
2952 
2953  if (!*best_ist)
2954  for (i = 0; i < graph->nb_outputs; i++)
2955  graph->outputs[i]->ost->unavailable = 1;
2956 
2957  return 0;
2958 }
2959 
2960 /**
2961  * Run a single step of transcoding.
2962  *
2963  * @return 0 for success, <0 for error
2964  */
2965 static int transcode_step(void)
2966 {
2967  OutputStream *ost;
2968  InputStream *ist;
2969  int ret;
2970 
2971  ost = choose_output();
2972  if (!ost) {
2973  if (got_eagain()) {
2974  reset_eagain();
2975  av_usleep(10000);
2976  return 0;
2977  }
2978  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
2979  return AVERROR_EOF;
2980  }
2981 
2982  if (ost->filter) {
2983  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
2984  return ret;
2985  if (!ist)
2986  return 0;
2987  } else {
2988  av_assert0(ost->source_index >= 0);
2989  ist = input_streams[ost->source_index];
2990  }
2991 
2992  ret = process_input(ist->file_index);
2993  if (ret == AVERROR(EAGAIN)) {
2994  if (input_files[ist->file_index]->eagain)
2995  ost->unavailable = 1;
2996  return 0;
2997  }
2998  if (ret < 0)
2999  return ret == AVERROR_EOF ? 0 : ret;
3000 
3001  return reap_filters();
3002 }
3003 
3004 /*
3005  * The following code is the main loop of the file converter
3006  */
3007 static int transcode(void)
3008 {
3009  int ret, i;
3010  AVFormatContext *os;
3011  OutputStream *ost;
3012  InputStream *ist;
3013  int64_t timer_start;
3014 
3015  ret = transcode_init();
3016  if (ret < 0)
3017  goto fail;
3018 
3019  if (stdin_interaction) {
3020  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
3021  }
3022 
3023  timer_start = av_gettime();
3024 
3025 #if HAVE_PTHREADS
3026  if ((ret = init_input_threads()) < 0)
3027  goto fail;
3028 #endif
3029 
3030  while (!received_sigterm) {
3031  int64_t cur_time= av_gettime();
3032 
3033  /* if 'q' pressed, exits */
3034  if (stdin_interaction)
3035  if (check_keyboard_interaction(cur_time) < 0)
3036  break;
3037 
3038  /* check if there's any stream where output is still needed */
3039  if (!need_output()) {
3040  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
3041  break;
3042  }
3043 
3044  ret = transcode_step();
3045  if (ret < 0) {
3046  if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
3047  continue;
3048 
3049  av_log(NULL, AV_LOG_ERROR, "Error while filtering.\n");
3050  break;
3051  }
3052 
3053  /* dump report by using the output first video and audio streams */
3054  print_report(0, timer_start, cur_time);
3055  }
3056 #if HAVE_PTHREADS
3058 #endif
3059 
3060  /* at the end of stream, we must flush the decoder buffers */
3061  for (i = 0; i < nb_input_streams; i++) {
3062  ist = input_streams[i];
3063  if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
3064  output_packet(ist, NULL);
3065  }
3066  }
3067  flush_encoders();
3068 
3069  term_exit();
3070 
3071  /* write the trailer if needed and close file */
3072  for (i = 0; i < nb_output_files; i++) {
3073  os = output_files[i]->ctx;
3074  av_write_trailer(os);
3075  }
3076 
3077  /* dump report by using the first video and audio streams */
3078  print_report(1, timer_start, av_gettime());
3079 
3080  /* close each encoder */
3081  for (i = 0; i < nb_output_streams; i++) {
3082  ost = output_streams[i];
3083  if (ost->encoding_needed) {
3084  av_freep(&ost->st->codec->stats_in);
3085  avcodec_close(ost->st->codec);
3086  }
3087  }
3088 
3089  /* close each decoder */
3090  for (i = 0; i < nb_input_streams; i++) {
3091  ist = input_streams[i];
3092  if (ist->decoding_needed) {
3093  avcodec_close(ist->st->codec);
3094  }
3095  }
3096 
3097  /* finished ! */
3098  ret = 0;
3099 
3100  fail:
3101 #if HAVE_PTHREADS
3103 #endif
3104 
3105  if (output_streams) {
3106  for (i = 0; i < nb_output_streams; i++) {
3107  ost = output_streams[i];
3108  if (ost) {
3109  if (ost->stream_copy)
3110  av_freep(&ost->st->codec->extradata);
3111  if (ost->logfile) {
3112  fclose(ost->logfile);
3113  ost->logfile = NULL;
3114  }
3115  av_freep(&ost->st->codec->subtitle_header);
3116  av_free(ost->forced_kf_pts);
3117  av_dict_free(&ost->opts);
3118  }
3119  }
3120  }
3121  return ret;
3122 }
3123 
3124 
3125 static int64_t getutime(void)
3126 {
3127 #if HAVE_GETRUSAGE
3128  struct rusage rusage;
3129 
3130  getrusage(RUSAGE_SELF, &rusage);
3131  return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
3132 #elif HAVE_GETPROCESSTIMES
3133  HANDLE proc;
3134  FILETIME c, e, k, u;
3135  proc = GetCurrentProcess();
3136  GetProcessTimes(proc, &c, &e, &k, &u);
3137  return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
3138 #else
3139  return av_gettime();
3140 #endif
3141 }
3142 
3143 static int64_t getmaxrss(void)
3144 {
3145 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
3146  struct rusage rusage;
3147  getrusage(RUSAGE_SELF, &rusage);
3148  return (int64_t)rusage.ru_maxrss * 1024;
3149 #elif HAVE_GETPROCESSMEMORYINFO
3150  HANDLE proc;
3151  PROCESS_MEMORY_COUNTERS memcounters;
3152  proc = GetCurrentProcess();
3153  memcounters.cb = sizeof(memcounters);
3154  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
3155  return memcounters.PeakPagefileUsage;
3156 #else
3157  return 0;
3158 #endif
3159 }
3160 
3161 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
3162 {
3163 }
3164 
3165 int main(int argc, char **argv)
3166 {
3167  int ret;
3168  int64_t ti;
3169 
3170  atexit(exit_program);
3171 
3172  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
3173 
3175  parse_loglevel(argc, argv, options);
3176 
3177  if(argc>1 && !strcmp(argv[1], "-d")){
3178  run_as_daemon=1;
3180  argc--;
3181  argv++;
3182  }
3183 
3185 #if CONFIG_AVDEVICE
3187 #endif
3189  av_register_all();
3191 
3192  show_banner(argc, argv, options);
3193 
3194  term_init();
3195 
3196  /* parse options and open all input/output files */
3197  ret = ffmpeg_parse_options(argc, argv);
3198  if (ret < 0)
3199  exit(1);
3200 
3201  if (nb_output_files <= 0 && nb_input_files == 0) {
3202  show_usage();
3203  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3204  exit(1);
3205  }
3206 
3207  /* file converter / grab */
3208  if (nb_output_files <= 0) {
3209  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
3210  exit(1);
3211  }
3212 
3213 // if (nb_input_files == 0) {
3214 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
3215 // exit(1);
3216 // }
3217 
3218  current_time = ti = getutime();
3219  if (transcode() < 0)
3220  exit(1);
3221  ti = getutime() - ti;
3222  if (do_benchmark) {
3223  int maxrss = getmaxrss() / 1024;
3224  printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);
3225  }
3226 
3227  exit(received_nb_signals ? 255 : 0);
3228  return 0;
3229 }