FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdint.h>
34 
35 #if HAVE_IO_H
36 #include <io.h>
37 #endif
38 #if HAVE_UNISTD_H
39 #include <unistd.h>
40 #endif
41 
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
45 #include "libavutil/opt.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
64 
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
68 
69 #if HAVE_SYS_RESOURCE_H
70 #include <sys/time.h>
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
74 #include <windows.h>
75 #endif
76 #if HAVE_GETPROCESSMEMORYINFO
77 #include <windows.h>
78 #include <psapi.h>
79 #endif
80 #if HAVE_SETCONSOLECTRLHANDLER
81 #include <windows.h>
82 #endif
83 
84 
85 #if HAVE_SYS_SELECT_H
86 #include <sys/select.h>
87 #endif
88 
89 #if HAVE_TERMIOS_H
90 #include <fcntl.h>
91 #include <sys/ioctl.h>
92 #include <sys/time.h>
93 #include <termios.h>
94 #elif HAVE_KBHIT
95 #include <conio.h>
96 #endif
97 
98 #if HAVE_PTHREADS
99 #include <pthread.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
126 
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static unsigned dup_warning = 1000;
130 static int nb_frames_drop = 0;
131 static int64_t decode_error_stat[2];
132 
133 static int want_sdp = 1;
134 
135 static int current_time;
137 
139 
144 
149 
152 
153 #if HAVE_TERMIOS_H
154 
155 /* init terminal so that we can grab keys */
156 static struct termios oldtty;
157 static int restore_tty;
158 #endif
159 
160 #if HAVE_PTHREADS
161 static void free_input_threads(void);
162 #endif
163 
164 /* sub2video hack:
165  Convert subtitles to video with alpha to insert them in filter graphs.
166  This is a temporary solution until libavfilter gets real subtitles support.
167  */
168 
170 {
171  int ret;
172  AVFrame *frame = ist->sub2video.frame;
173 
174  av_frame_unref(frame);
175  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
176  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
178  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
179  return ret;
180  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
181  return 0;
182 }
183 
184 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
185  AVSubtitleRect *r)
186 {
187  uint32_t *pal, *dst2;
188  uint8_t *src, *src2;
189  int x, y;
190 
191  if (r->type != SUBTITLE_BITMAP) {
192  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
193  return;
194  }
195  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
196  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
197  r->x, r->y, r->w, r->h, w, h
198  );
199  return;
200  }
201 
202  dst += r->y * dst_linesize + r->x * 4;
203  src = r->data[0];
204  pal = (uint32_t *)r->data[1];
205  for (y = 0; y < r->h; y++) {
206  dst2 = (uint32_t *)dst;
207  src2 = src;
208  for (x = 0; x < r->w; x++)
209  *(dst2++) = pal[*(src2++)];
210  dst += dst_linesize;
211  src += r->linesize[0];
212  }
213 }
214 
215 static void sub2video_push_ref(InputStream *ist, int64_t pts)
216 {
217  AVFrame *frame = ist->sub2video.frame;
218  int i;
219 
220  av_assert1(frame->data[0]);
221  ist->sub2video.last_pts = frame->pts = pts;
222  for (i = 0; i < ist->nb_filters; i++)
226 }
227 
228 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
229 {
230  AVFrame *frame = ist->sub2video.frame;
231  int8_t *dst;
232  int dst_linesize;
233  int num_rects, i;
234  int64_t pts, end_pts;
235 
236  if (!frame)
237  return;
238  if (sub) {
239  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
240  AV_TIME_BASE_Q, ist->st->time_base);
241  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
242  AV_TIME_BASE_Q, ist->st->time_base);
243  num_rects = sub->num_rects;
244  } else {
245  pts = ist->sub2video.end_pts;
246  end_pts = INT64_MAX;
247  num_rects = 0;
248  }
249  if (sub2video_get_blank_frame(ist) < 0) {
251  "Impossible to get a blank canvas.\n");
252  return;
253  }
254  dst = frame->data [0];
255  dst_linesize = frame->linesize[0];
256  for (i = 0; i < num_rects; i++)
257  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
258  sub2video_push_ref(ist, pts);
259  ist->sub2video.end_pts = end_pts;
260 }
261 
262 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
263 {
264  InputFile *infile = input_files[ist->file_index];
265  int i, j, nb_reqs;
266  int64_t pts2;
267 
268  /* When a frame is read from a file, examine all sub2video streams in
269  the same file and send the sub2video frame again. Otherwise, decoded
270  video frames could be accumulating in the filter graph while a filter
271  (possibly overlay) is desperately waiting for a subtitle frame. */
272  for (i = 0; i < infile->nb_streams; i++) {
273  InputStream *ist2 = input_streams[infile->ist_index + i];
274  if (!ist2->sub2video.frame)
275  continue;
276  /* subtitles seem to be usually muxed ahead of other streams;
277  if not, subtracting a larger time here is necessary */
278  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
279  /* do not send the heartbeat frame if the subtitle is already ahead */
280  if (pts2 <= ist2->sub2video.last_pts)
281  continue;
282  if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
283  sub2video_update(ist2, NULL);
284  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
285  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
286  if (nb_reqs)
287  sub2video_push_ref(ist2, pts2);
288  }
289 }
290 
291 static void sub2video_flush(InputStream *ist)
292 {
293  int i;
294 
295  if (ist->sub2video.end_pts < INT64_MAX)
296  sub2video_update(ist, NULL);
297  for (i = 0; i < ist->nb_filters; i++)
299 }
300 
301 /* end of sub2video hack */
302 
303 static void term_exit_sigsafe(void)
304 {
305 #if HAVE_TERMIOS_H
306  if(restore_tty)
307  tcsetattr (0, TCSANOW, &oldtty);
308 #endif
309 }
310 
311 void term_exit(void)
312 {
313  av_log(NULL, AV_LOG_QUIET, "%s", "");
315 }
316 
317 static volatile int received_sigterm = 0;
318 static volatile int received_nb_signals = 0;
319 static volatile int transcode_init_done = 0;
320 static volatile int ffmpeg_exited = 0;
321 static int main_return_code = 0;
322 
323 static void
325 {
326  received_sigterm = sig;
329  if(received_nb_signals > 3) {
330  write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
331  strlen("Received > 3 system signals, hard exiting\n"));
332 
333  exit(123);
334  }
335 }
336 
337 #if HAVE_SETCONSOLECTRLHANDLER
338 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
339 {
340  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
341 
342  switch (fdwCtrlType)
343  {
344  case CTRL_C_EVENT:
345  case CTRL_BREAK_EVENT:
346  sigterm_handler(SIGINT);
347  return TRUE;
348 
349  case CTRL_CLOSE_EVENT:
350  case CTRL_LOGOFF_EVENT:
351  case CTRL_SHUTDOWN_EVENT:
352  sigterm_handler(SIGTERM);
353  /* Basically, with these 3 events, when we return from this method the
354  process is hard terminated, so stall as long as we need to
355  to try and let the main thread(s) clean up and gracefully terminate
356  (we have at most 5 seconds, but should be done far before that). */
357  while (!ffmpeg_exited) {
358  Sleep(0);
359  }
360  return TRUE;
361 
362  default:
363  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
364  return FALSE;
365  }
366 }
367 #endif
368 
369 void term_init(void)
370 {
371 #if HAVE_TERMIOS_H
373  struct termios tty;
374  if (tcgetattr (0, &tty) == 0) {
375  oldtty = tty;
376  restore_tty = 1;
377 
378  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
379  |INLCR|IGNCR|ICRNL|IXON);
380  tty.c_oflag |= OPOST;
381  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
382  tty.c_cflag &= ~(CSIZE|PARENB);
383  tty.c_cflag |= CS8;
384  tty.c_cc[VMIN] = 1;
385  tty.c_cc[VTIME] = 0;
386 
387  tcsetattr (0, TCSANOW, &tty);
388  }
389  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
390  }
391 #endif
392 
393  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
394  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
395 #ifdef SIGXCPU
396  signal(SIGXCPU, sigterm_handler);
397 #endif
398 #if HAVE_SETCONSOLECTRLHANDLER
399  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
400 #endif
401 }
402 
403 /* read a key without blocking */
404 static int read_key(void)
405 {
406  unsigned char ch;
407 #if HAVE_TERMIOS_H
408  int n = 1;
409  struct timeval tv;
410  fd_set rfds;
411 
412  FD_ZERO(&rfds);
413  FD_SET(0, &rfds);
414  tv.tv_sec = 0;
415  tv.tv_usec = 0;
416  n = select(1, &rfds, NULL, NULL, &tv);
417  if (n > 0) {
418  n = read(0, &ch, 1);
419  if (n == 1)
420  return ch;
421 
422  return n;
423  }
424 #elif HAVE_KBHIT
425 # if HAVE_PEEKNAMEDPIPE
426  static int is_pipe;
427  static HANDLE input_handle;
428  DWORD dw, nchars;
429  if(!input_handle){
430  input_handle = GetStdHandle(STD_INPUT_HANDLE);
431  is_pipe = !GetConsoleMode(input_handle, &dw);
432  }
433 
434  if (is_pipe) {
435  /* When running under a GUI, you will end here. */
436  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
437  // input pipe may have been closed by the program that ran ffmpeg
438  return -1;
439  }
440  //Read it
441  if(nchars != 0) {
442  read(0, &ch, 1);
443  return ch;
444  }else{
445  return -1;
446  }
447  }
448 # endif
449  if(kbhit())
450  return(getch());
451 #endif
452  return -1;
453 }
454 
455 static int decode_interrupt_cb(void *ctx)
456 {
458 }
459 
461 
462 static void ffmpeg_cleanup(int ret)
463 {
464  int i, j;
465 
466  if (do_benchmark) {
467  int maxrss = getmaxrss() / 1024;
468  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
469  }
470 
471  for (i = 0; i < nb_filtergraphs; i++) {
472  FilterGraph *fg = filtergraphs[i];
474  for (j = 0; j < fg->nb_inputs; j++) {
476  av_freep(&fg->inputs[j]->name);
477  av_freep(&fg->inputs[j]);
478  }
479  av_freep(&fg->inputs);
480  for (j = 0; j < fg->nb_outputs; j++) {
481  av_freep(&fg->outputs[j]->name);
482  av_freep(&fg->outputs[j]->formats);
483  av_freep(&fg->outputs[j]->channel_layouts);
484  av_freep(&fg->outputs[j]->sample_rates);
485  av_freep(&fg->outputs[j]);
486  }
487  av_freep(&fg->outputs);
488  av_freep(&fg->graph_desc);
489 
490  av_freep(&filtergraphs[i]);
491  }
492  av_freep(&filtergraphs);
493 
495 
496  /* close files */
497  for (i = 0; i < nb_output_files; i++) {
498  OutputFile *of = output_files[i];
500  if (!of)
501  continue;
502  s = of->ctx;
503  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
504  avio_closep(&s->pb);
506  av_dict_free(&of->opts);
507 
508  av_freep(&output_files[i]);
509  }
510  for (i = 0; i < nb_output_streams; i++) {
511  OutputStream *ost = output_streams[i];
512 
513  if (!ost)
514  continue;
515 
516  for (j = 0; j < ost->nb_bitstream_filters; j++)
517  av_bsf_free(&ost->bsf_ctx[j]);
518  av_freep(&ost->bsf_ctx);
520 
522  av_frame_free(&ost->last_frame);
523  av_dict_free(&ost->encoder_opts);
524 
525  av_parser_close(ost->parser);
527 
528  av_freep(&ost->forced_keyframes);
530  av_freep(&ost->avfilter);
531  av_freep(&ost->logfile_prefix);
532 
534  ost->audio_channels_mapped = 0;
535 
536  av_dict_free(&ost->sws_dict);
537 
540 
541  while (ost->muxing_queue && av_fifo_size(ost->muxing_queue)) {
542  AVPacket pkt;
543  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
544  av_packet_unref(&pkt);
545  }
547 
548  av_freep(&output_streams[i]);
549  }
550 #if HAVE_PTHREADS
551  free_input_threads();
552 #endif
553  for (i = 0; i < nb_input_files; i++) {
554  avformat_close_input(&input_files[i]->ctx);
555  av_freep(&input_files[i]);
556  }
557  for (i = 0; i < nb_input_streams; i++) {
558  InputStream *ist = input_streams[i];
559 
562  av_dict_free(&ist->decoder_opts);
565  av_freep(&ist->filters);
566  av_freep(&ist->hwaccel_device);
567  av_freep(&ist->dts_buffer);
568 
570 
571  av_freep(&input_streams[i]);
572  }
573 
574  if (vstats_file) {
575  if (fclose(vstats_file))
577  "Error closing vstats file, loss of information possible: %s\n",
578  av_err2str(AVERROR(errno)));
579  }
581 
582  av_freep(&input_streams);
583  av_freep(&input_files);
584  av_freep(&output_streams);
585  av_freep(&output_files);
586 
587  uninit_opts();
588 
590 
591  if (received_sigterm) {
592  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
593  (int) received_sigterm);
594  } else if (ret && transcode_init_done) {
595  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
596  }
597  term_exit();
598  ffmpeg_exited = 1;
599 }
600 
602 {
603  AVDictionaryEntry *t = NULL;
604 
605  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
607  }
608 }
609 
611 {
613  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
614  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
615  exit_program(1);
616  }
617 }
618 
619 static void abort_codec_experimental(AVCodec *c, int encoder)
620 {
621  exit_program(1);
622 }
623 
624 static void update_benchmark(const char *fmt, ...)
625 {
626  if (do_benchmark_all) {
627  int64_t t = getutime();
628  va_list va;
629  char buf[1024];
630 
631  if (fmt) {
632  va_start(va, fmt);
633  vsnprintf(buf, sizeof(buf), fmt, va);
634  va_end(va);
635  av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
636  }
637  current_time = t;
638  }
639 }
640 
641 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
642 {
643  int i;
644  for (i = 0; i < nb_output_streams; i++) {
645  OutputStream *ost2 = output_streams[i];
646  ost2->finished |= ost == ost2 ? this_stream : others;
647  }
648 }
649 
651 {
652  AVFormatContext *s = of->ctx;
653  AVStream *st = ost->st;
654  int ret;
655 
656  if (!of->header_written) {
657  AVPacket tmp_pkt;
658  /* the muxer is not initialized yet, buffer the packet */
659  if (!av_fifo_space(ost->muxing_queue)) {
660  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
661  ost->max_muxing_queue_size);
662  if (new_size <= av_fifo_size(ost->muxing_queue)) {
664  "Too many packets buffered for output stream %d:%d.\n",
665  ost->file_index, ost->st->index);
666  exit_program(1);
667  }
668  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
669  if (ret < 0)
670  exit_program(1);
671  }
672  av_packet_move_ref(&tmp_pkt, pkt);
673  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
674  return;
675  }
676 
679  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
680 
681  /*
682  * Audio encoders may split the packets -- #frames in != #packets out.
683  * But there is no reordering, so we can limit the number of output packets
684  * by simply dropping them here.
685  * Counting encoded video frames needs to be done separately because of
686  * reordering, see do_video_out()
687  */
688  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
689  if (ost->frame_number >= ost->max_frames) {
690  av_packet_unref(pkt);
691  return;
692  }
693  ost->frame_number++;
694  }
695  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
696  int i;
698  NULL);
699  ost->quality = sd ? AV_RL32(sd) : -1;
700  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
701 
702  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
703  if (sd && i < sd[5])
704  ost->error[i] = AV_RL64(sd + 8 + 8*i);
705  else
706  ost->error[i] = -1;
707  }
708 
709  if (ost->frame_rate.num && ost->is_cfr) {
710  if (pkt->duration > 0)
711  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
712  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
713  ost->st->time_base);
714  }
715  }
716 
717  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
718  if (pkt->dts != AV_NOPTS_VALUE &&
719  pkt->pts != AV_NOPTS_VALUE &&
720  pkt->dts > pkt->pts) {
721  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
722  pkt->dts, pkt->pts,
723  ost->file_index, ost->st->index);
724  pkt->pts =
725  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
726  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
727  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
728  }
730  pkt->dts != AV_NOPTS_VALUE &&
731  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
732  ost->last_mux_dts != AV_NOPTS_VALUE) {
733  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
734  if (pkt->dts < max) {
735  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
736  av_log(s, loglevel, "Non-monotonous DTS in output stream "
737  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
738  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
739  if (exit_on_error) {
740  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
741  exit_program(1);
742  }
743  av_log(s, loglevel, "changing to %"PRId64". This may result "
744  "in incorrect timestamps in the output file.\n",
745  max);
746  if (pkt->pts >= pkt->dts)
747  pkt->pts = FFMAX(pkt->pts, max);
748  pkt->dts = max;
749  }
750  }
751  }
752  ost->last_mux_dts = pkt->dts;
753 
754  ost->data_size += pkt->size;
755  ost->packets_written++;
756 
757  pkt->stream_index = ost->index;
758 
759  if (debug_ts) {
760  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
761  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
763  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
764  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
765  pkt->size
766  );
767  }
768 
769  ret = av_interleaved_write_frame(s, pkt);
770  if (ret < 0) {
771  print_error("av_interleaved_write_frame()", ret);
772  main_return_code = 1;
774  }
775  av_packet_unref(pkt);
776 }
777 
779 {
780  OutputFile *of = output_files[ost->file_index];
781 
782  ost->finished |= ENCODER_FINISHED;
783  if (of->shortest) {
784  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
785  of->recording_time = FFMIN(of->recording_time, end);
786  }
787 }
788 
790 {
791  int ret = 0;
792 
793  /* apply the output bitstream filters, if any */
794  if (ost->nb_bitstream_filters) {
795  int idx;
796 
798  ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
799  if (ret < 0)
800  goto finish;
801 
802  idx = 1;
803  while (idx) {
804  /* get a packet from the previous filter up the chain */
805  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
806  if (ret == AVERROR(EAGAIN)) {
807  ret = 0;
808  idx--;
809  continue;
810  } else if (ret < 0)
811  goto finish;
812  /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
813  * the api states this shouldn't happen after init(). Propagate it here to the
814  * muxer and to the next filters in the chain to workaround this.
815  * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
816  * par_out->extradata and adapt muxers accordingly to get rid of this. */
817  if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
818  ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
819  if (ret < 0)
820  goto finish;
821  ost->bsf_extradata_updated[idx - 1] |= 1;
822  }
823 
824  /* send it to the next filter down the chain or to the muxer */
825  if (idx < ost->nb_bitstream_filters) {
826  /* HACK/FIXME! - See above */
827  if (!(ost->bsf_extradata_updated[idx] & 2)) {
828  ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
829  if (ret < 0)
830  goto finish;
831  ost->bsf_extradata_updated[idx] |= 2;
832  }
833  ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
834  if (ret < 0)
835  goto finish;
836  idx++;
837  } else
838  write_packet(of, pkt, ost);
839  }
840  } else
841  write_packet(of, pkt, ost);
842 
843 finish:
844  if (ret < 0 && ret != AVERROR_EOF) {
845  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
846  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
847  if(exit_on_error)
848  exit_program(1);
849  }
850 }
851 
853 {
854  OutputFile *of = output_files[ost->file_index];
855 
856  if (of->recording_time != INT64_MAX &&
858  AV_TIME_BASE_Q) >= 0) {
859  close_output_stream(ost);
860  return 0;
861  }
862  return 1;
863 }
864 
865 static void do_audio_out(OutputFile *of, OutputStream *ost,
866  AVFrame *frame)
867 {
868  AVCodecContext *enc = ost->enc_ctx;
869  AVPacket pkt;
870  int ret;
871 
872  av_init_packet(&pkt);
873  pkt.data = NULL;
874  pkt.size = 0;
875 
876  if (!check_recording_time(ost))
877  return;
878 
879  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
880  frame->pts = ost->sync_opts;
881  ost->sync_opts = frame->pts + frame->nb_samples;
882  ost->samples_encoded += frame->nb_samples;
883  ost->frames_encoded++;
884 
885  av_assert0(pkt.size || !pkt.data);
887  if (debug_ts) {
888  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
889  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
890  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
891  enc->time_base.num, enc->time_base.den);
892  }
893 
894  ret = avcodec_send_frame(enc, frame);
895  if (ret < 0)
896  goto error;
897 
898  while (1) {
899  ret = avcodec_receive_packet(enc, &pkt);
900  if (ret == AVERROR(EAGAIN))
901  break;
902  if (ret < 0)
903  goto error;
904 
905  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
906 
907  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
908 
909  if (debug_ts) {
910  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
911  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
912  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
913  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
914  }
915 
916  output_packet(of, &pkt, ost);
917  }
918 
919  return;
920 error:
921  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
922  exit_program(1);
923 }
924 
925 static void do_subtitle_out(OutputFile *of,
926  OutputStream *ost,
927  AVSubtitle *sub)
928 {
929  int subtitle_out_max_size = 1024 * 1024;
930  int subtitle_out_size, nb, i;
931  AVCodecContext *enc;
932  AVPacket pkt;
933  int64_t pts;
934 
935  if (sub->pts == AV_NOPTS_VALUE) {
936  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
937  if (exit_on_error)
938  exit_program(1);
939  return;
940  }
941 
942  enc = ost->enc_ctx;
943 
944  if (!subtitle_out) {
945  subtitle_out = av_malloc(subtitle_out_max_size);
946  if (!subtitle_out) {
947  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
948  exit_program(1);
949  }
950  }
951 
952  /* Note: DVB subtitle need one packet to draw them and one other
953  packet to clear them */
954  /* XXX: signal it in the codec context ? */
956  nb = 2;
957  else
958  nb = 1;
959 
960  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
961  pts = sub->pts;
962  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
963  pts -= output_files[ost->file_index]->start_time;
964  for (i = 0; i < nb; i++) {
965  unsigned save_num_rects = sub->num_rects;
966 
967  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
968  if (!check_recording_time(ost))
969  return;
970 
971  sub->pts = pts;
972  // start_display_time is required to be 0
973  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
975  sub->start_display_time = 0;
976  if (i == 1)
977  sub->num_rects = 0;
978 
979  ost->frames_encoded++;
980 
981  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
982  subtitle_out_max_size, sub);
983  if (i == 1)
984  sub->num_rects = save_num_rects;
985  if (subtitle_out_size < 0) {
986  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
987  exit_program(1);
988  }
989 
990  av_init_packet(&pkt);
991  pkt.data = subtitle_out;
992  pkt.size = subtitle_out_size;
993  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
994  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
995  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
996  /* XXX: the pts correction is handled here. Maybe handling
997  it in the codec would be better */
998  if (i == 0)
999  pkt.pts += 90 * sub->start_display_time;
1000  else
1001  pkt.pts += 90 * sub->end_display_time;
1002  }
1003  pkt.dts = pkt.pts;
1004  output_packet(of, &pkt, ost);
1005  }
1006 }
1007 
1008 static void do_video_out(OutputFile *of,
1009  OutputStream *ost,
1010  AVFrame *next_picture,
1011  double sync_ipts)
1012 {
1013  int ret, format_video_sync;
1014  AVPacket pkt;
1015  AVCodecContext *enc = ost->enc_ctx;
1016  AVCodecParameters *mux_par = ost->st->codecpar;
1017  AVRational frame_rate;
1018  int nb_frames, nb0_frames, i;
1019  double delta, delta0;
1020  double duration = 0;
1021  int frame_size = 0;
1022  InputStream *ist = NULL;
1024 
1025  if (ost->source_index >= 0)
1026  ist = input_streams[ost->source_index];
1027 
1028  frame_rate = av_buffersink_get_frame_rate(filter);
1029  if (frame_rate.num > 0 && frame_rate.den > 0)
1030  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1031 
1032  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1033  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1034 
1035  if (!ost->filters_script &&
1036  !ost->filters &&
1037  next_picture &&
1038  ist &&
1039  lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1040  duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1041  }
1042 
1043  if (!next_picture) {
1044  //end, flushing
1045  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1046  ost->last_nb0_frames[1],
1047  ost->last_nb0_frames[2]);
1048  } else {
1049  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1050  delta = delta0 + duration;
1051 
1052  /* by default, we output a single frame */
1053  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1054  nb_frames = 1;
1055 
1056  format_video_sync = video_sync_method;
1057  if (format_video_sync == VSYNC_AUTO) {
1058  if(!strcmp(of->ctx->oformat->name, "avi")) {
1059  format_video_sync = VSYNC_VFR;
1060  } else
1061  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1062  if ( ist
1063  && format_video_sync == VSYNC_CFR
1064  && input_files[ist->file_index]->ctx->nb_streams == 1
1065  && input_files[ist->file_index]->input_ts_offset == 0) {
1066  format_video_sync = VSYNC_VSCFR;
1067  }
1068  if (format_video_sync == VSYNC_CFR && copy_ts) {
1069  format_video_sync = VSYNC_VSCFR;
1070  }
1071  }
1072  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1073 
1074  if (delta0 < 0 &&
1075  delta > 0 &&
1076  format_video_sync != VSYNC_PASSTHROUGH &&
1077  format_video_sync != VSYNC_DROP) {
1078  if (delta0 < -0.6) {
1079  av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1080  } else
1081  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1082  sync_ipts = ost->sync_opts;
1083  duration += delta0;
1084  delta0 = 0;
1085  }
1086 
1087  switch (format_video_sync) {
1088  case VSYNC_VSCFR:
1089  if (ost->frame_number == 0 && delta0 >= 0.5) {
1090  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1091  delta = duration;
1092  delta0 = 0;
1093  ost->sync_opts = lrint(sync_ipts);
1094  }
1095  case VSYNC_CFR:
1096  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1097  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1098  nb_frames = 0;
1099  } else if (delta < -1.1)
1100  nb_frames = 0;
1101  else if (delta > 1.1) {
1102  nb_frames = lrintf(delta);
1103  if (delta0 > 1.1)
1104  nb0_frames = lrintf(delta0 - 0.6);
1105  }
1106  break;
1107  case VSYNC_VFR:
1108  if (delta <= -0.6)
1109  nb_frames = 0;
1110  else if (delta > 0.6)
1111  ost->sync_opts = lrint(sync_ipts);
1112  break;
1113  case VSYNC_DROP:
1114  case VSYNC_PASSTHROUGH:
1115  ost->sync_opts = lrint(sync_ipts);
1116  break;
1117  default:
1118  av_assert0(0);
1119  }
1120  }
1121 
1122  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1123  nb0_frames = FFMIN(nb0_frames, nb_frames);
1124 
1125  memmove(ost->last_nb0_frames + 1,
1126  ost->last_nb0_frames,
1127  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1128  ost->last_nb0_frames[0] = nb0_frames;
1129 
1130  if (nb0_frames == 0 && ost->last_dropped) {
1131  nb_frames_drop++;
1133  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1134  ost->frame_number, ost->st->index, ost->last_frame->pts);
1135  }
1136  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1137  if (nb_frames > dts_error_threshold * 30) {
1138  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1139  nb_frames_drop++;
1140  return;
1141  }
1142  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1143  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1144  if (nb_frames_dup > dup_warning) {
1145  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1146  dup_warning *= 10;
1147  }
1148  }
1149  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1150 
1151  /* duplicates frame if needed */
1152  for (i = 0; i < nb_frames; i++) {
1153  AVFrame *in_picture;
1154  av_init_packet(&pkt);
1155  pkt.data = NULL;
1156  pkt.size = 0;
1157 
1158  if (i < nb0_frames && ost->last_frame) {
1159  in_picture = ost->last_frame;
1160  } else
1161  in_picture = next_picture;
1162 
1163  if (!in_picture)
1164  return;
1165 
1166  in_picture->pts = ost->sync_opts;
1167 
1168 #if 1
1169  if (!check_recording_time(ost))
1170 #else
1171  if (ost->frame_number >= ost->max_frames)
1172 #endif
1173  return;
1174 
1175 #if FF_API_LAVF_FMT_RAWPICTURE
1176  if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1177  enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1178  /* raw pictures are written as AVPicture structure to
1179  avoid any copies. We support temporarily the older
1180  method. */
1181  if (in_picture->interlaced_frame)
1182  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1183  else
1184  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1185  pkt.data = (uint8_t *)in_picture;
1186  pkt.size = sizeof(AVPicture);
1187  pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1188  pkt.flags |= AV_PKT_FLAG_KEY;
1189 
1190  output_packet(of, &pkt, ost);
1191  } else
1192 #endif
1193  {
1194  int forced_keyframe = 0;
1195  double pts_time;
1196 
1198  ost->top_field_first >= 0)
1199  in_picture->top_field_first = !!ost->top_field_first;
1200 
1201  if (in_picture->interlaced_frame) {
1202  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1203  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1204  else
1205  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1206  } else
1207  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1208 
1209  in_picture->quality = enc->global_quality;
1210  in_picture->pict_type = 0;
1211 
1212  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1213  in_picture->pts * av_q2d(enc->time_base) : NAN;
1214  if (ost->forced_kf_index < ost->forced_kf_count &&
1215  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1216  ost->forced_kf_index++;
1217  forced_keyframe = 1;
1218  } else if (ost->forced_keyframes_pexpr) {
1219  double res;
1220  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1223  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1229  res);
1230  if (res) {
1231  forced_keyframe = 1;
1237  }
1238 
1240  } else if ( ost->forced_keyframes
1241  && !strncmp(ost->forced_keyframes, "source", 6)
1242  && in_picture->key_frame==1) {
1243  forced_keyframe = 1;
1244  }
1245 
1246  if (forced_keyframe) {
1247  in_picture->pict_type = AV_PICTURE_TYPE_I;
1248  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1249  }
1250 
1252  if (debug_ts) {
1253  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1254  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1255  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1256  enc->time_base.num, enc->time_base.den);
1257  }
1258 
1259  ost->frames_encoded++;
1260 
1261  ret = avcodec_send_frame(enc, in_picture);
1262  if (ret < 0)
1263  goto error;
1264 
1265  while (1) {
1266  ret = avcodec_receive_packet(enc, &pkt);
1267  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1268  if (ret == AVERROR(EAGAIN))
1269  break;
1270  if (ret < 0)
1271  goto error;
1272 
1273  if (debug_ts) {
1274  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1275  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1276  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1277  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1278  }
1279 
1280  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1281  pkt.pts = ost->sync_opts;
1282 
1283  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1284 
1285  if (debug_ts) {
1286  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1287  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1288  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1289  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1290  }
1291 
1292  frame_size = pkt.size;
1293  output_packet(of, &pkt, ost);
1294 
1295  /* if two pass, output log */
1296  if (ost->logfile && enc->stats_out) {
1297  fprintf(ost->logfile, "%s", enc->stats_out);
1298  }
1299  }
1300  }
1301  ost->sync_opts++;
1302  /*
1303  * For video, number of frames in == number of packets out.
1304  * But there may be reordering, so we can't throw away frames on encoder
1305  * flush, we need to limit them here, before they go into encoder.
1306  */
1307  ost->frame_number++;
1308 
1309  if (vstats_filename && frame_size)
1310  do_video_stats(ost, frame_size);
1311  }
1312 
1313  if (!ost->last_frame)
1314  ost->last_frame = av_frame_alloc();
1315  av_frame_unref(ost->last_frame);
1316  if (next_picture && ost->last_frame)
1317  av_frame_ref(ost->last_frame, next_picture);
1318  else
1319  av_frame_free(&ost->last_frame);
1320 
1321  return;
1322 error:
1323  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1324  exit_program(1);
1325 }
1326 
1327 static double psnr(double d)
1328 {
1329  return -10.0 * log10(d);
1330 }
1331 
1333 {
1334  AVCodecContext *enc;
1335  int frame_number;
1336  double ti1, bitrate, avg_bitrate;
1337 
1338  /* this is executed just the first time do_video_stats is called */
1339  if (!vstats_file) {
1340  vstats_file = fopen(vstats_filename, "w");
1341  if (!vstats_file) {
1342  perror("fopen");
1343  exit_program(1);
1344  }
1345  }
1346 
1347  enc = ost->enc_ctx;
1348  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1349  frame_number = ost->st->nb_frames;
1350  if (vstats_version <= 1) {
1351  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1352  ost->quality / (float)FF_QP2LAMBDA);
1353  } else {
1354  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1355  ost->quality / (float)FF_QP2LAMBDA);
1356  }
1357 
1358  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1359  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1360 
1361  fprintf(vstats_file,"f_size= %6d ", frame_size);
1362  /* compute pts value */
1363  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1364  if (ti1 < 0.01)
1365  ti1 = 0.01;
1366 
1367  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1368  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1369  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1370  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1371  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1372  }
1373 }
1374 
1376 {
1377  OutputFile *of = output_files[ost->file_index];
1378  int i;
1379 
1381 
1382  if (of->shortest) {
1383  for (i = 0; i < of->ctx->nb_streams; i++)
1384  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1385  }
1386 }
1387 
1388 /**
1389  * Get and encode new output from any of the filtergraphs, without causing
1390  * activity.
1391  *
1392  * @return 0 for success, <0 for severe errors
1393  */
1394 static int reap_filters(int flush)
1395 {
1396  AVFrame *filtered_frame = NULL;
1397  int i;
1398 
1399  /* Reap all buffers present in the buffer sinks */
1400  for (i = 0; i < nb_output_streams; i++) {
1401  OutputStream *ost = output_streams[i];
1402  OutputFile *of = output_files[ost->file_index];
1404  AVCodecContext *enc = ost->enc_ctx;
1405  int ret = 0;
1406 
1407  if (!ost->filter)
1408  continue;
1409  filter = ost->filter->filter;
1410 
1411  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1412  return AVERROR(ENOMEM);
1413  }
1414  filtered_frame = ost->filtered_frame;
1415 
1416  while (1) {
1417  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1418  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1420  if (ret < 0) {
1421  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1423  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1424  } else if (flush && ret == AVERROR_EOF) {
1426  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1427  }
1428  break;
1429  }
1430  if (ost->finished) {
1431  av_frame_unref(filtered_frame);
1432  continue;
1433  }
1434  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1435  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1436  AVRational filter_tb = av_buffersink_get_time_base(filter);
1437  AVRational tb = enc->time_base;
1438  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1439 
1440  tb.den <<= extra_bits;
1441  float_pts =
1442  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1443  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1444  float_pts /= 1 << extra_bits;
1445  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1446  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1447 
1448  filtered_frame->pts =
1449  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1450  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1451  }
1452  //if (ost->source_index >= 0)
1453  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1454 
1455  switch (av_buffersink_get_type(filter)) {
1456  case AVMEDIA_TYPE_VIDEO:
1457  if (!ost->frame_aspect_ratio.num)
1458  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1459 
1460  if (debug_ts) {
1461  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1462  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1463  float_pts,
1464  enc->time_base.num, enc->time_base.den);
1465  }
1466 
1467  do_video_out(of, ost, filtered_frame, float_pts);
1468  break;
1469  case AVMEDIA_TYPE_AUDIO:
1470  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1471  enc->channels != av_frame_get_channels(filtered_frame)) {
1473  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1474  break;
1475  }
1476  do_audio_out(of, ost, filtered_frame);
1477  break;
1478  default:
1479  // TODO support subtitle filters
1480  av_assert0(0);
1481  }
1482 
1483  av_frame_unref(filtered_frame);
1484  }
1485  }
1486 
1487  return 0;
1488 }
1489 
1490 static void print_final_stats(int64_t total_size)
1491 {
1492  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1493  uint64_t subtitle_size = 0;
1494  uint64_t data_size = 0;
1495  float percent = -1.0;
1496  int i, j;
1497  int pass1_used = 1;
1498 
1499  for (i = 0; i < nb_output_streams; i++) {
1500  OutputStream *ost = output_streams[i];
1501  switch (ost->enc_ctx->codec_type) {
1502  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1503  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1504  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1505  default: other_size += ost->data_size; break;
1506  }
1507  extra_size += ost->enc_ctx->extradata_size;
1508  data_size += ost->data_size;
1511  pass1_used = 0;
1512  }
1513 
1514  if (data_size && total_size>0 && total_size >= data_size)
1515  percent = 100.0 * (total_size - data_size) / data_size;
1516 
1517  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1518  video_size / 1024.0,
1519  audio_size / 1024.0,
1520  subtitle_size / 1024.0,
1521  other_size / 1024.0,
1522  extra_size / 1024.0);
1523  if (percent >= 0.0)
1524  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1525  else
1526  av_log(NULL, AV_LOG_INFO, "unknown");
1527  av_log(NULL, AV_LOG_INFO, "\n");
1528 
1529  /* print verbose per-stream stats */
1530  for (i = 0; i < nb_input_files; i++) {
1531  InputFile *f = input_files[i];
1532  uint64_t total_packets = 0, total_size = 0;
1533 
1534  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1535  i, f->ctx->filename);
1536 
1537  for (j = 0; j < f->nb_streams; j++) {
1538  InputStream *ist = input_streams[f->ist_index + j];
1539  enum AVMediaType type = ist->dec_ctx->codec_type;
1540 
1541  total_size += ist->data_size;
1542  total_packets += ist->nb_packets;
1543 
1544  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1545  i, j, media_type_string(type));
1546  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1547  ist->nb_packets, ist->data_size);
1548 
1549  if (ist->decoding_needed) {
1550  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1551  ist->frames_decoded);
1552  if (type == AVMEDIA_TYPE_AUDIO)
1553  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1554  av_log(NULL, AV_LOG_VERBOSE, "; ");
1555  }
1556 
1557  av_log(NULL, AV_LOG_VERBOSE, "\n");
1558  }
1559 
1560  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1561  total_packets, total_size);
1562  }
1563 
1564  for (i = 0; i < nb_output_files; i++) {
1565  OutputFile *of = output_files[i];
1566  uint64_t total_packets = 0, total_size = 0;
1567 
1568  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1569  i, of->ctx->filename);
1570 
1571  for (j = 0; j < of->ctx->nb_streams; j++) {
1572  OutputStream *ost = output_streams[of->ost_index + j];
1573  enum AVMediaType type = ost->enc_ctx->codec_type;
1574 
1575  total_size += ost->data_size;
1576  total_packets += ost->packets_written;
1577 
1578  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1579  i, j, media_type_string(type));
1580  if (ost->encoding_needed) {
1581  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1582  ost->frames_encoded);
1583  if (type == AVMEDIA_TYPE_AUDIO)
1584  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1585  av_log(NULL, AV_LOG_VERBOSE, "; ");
1586  }
1587 
1588  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1589  ost->packets_written, ost->data_size);
1590 
1591  av_log(NULL, AV_LOG_VERBOSE, "\n");
1592  }
1593 
1594  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1595  total_packets, total_size);
1596  }
1597  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1598  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1599  if (pass1_used) {
1600  av_log(NULL, AV_LOG_WARNING, "\n");
1601  } else {
1602  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1603  }
1604  }
1605 }
1606 
1607 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1608 {
1609  char buf[1024];
1610  AVBPrint buf_script;
1611  OutputStream *ost;
1612  AVFormatContext *oc;
1613  int64_t total_size;
1614  AVCodecContext *enc;
1615  int frame_number, vid, i;
1616  double bitrate;
1617  double speed;
1618  int64_t pts = INT64_MIN + 1;
1619  static int64_t last_time = -1;
1620  static int qp_histogram[52];
1621  int hours, mins, secs, us;
1622  int ret;
1623  float t;
1624 
1625  if (!print_stats && !is_last_report && !progress_avio)
1626  return;
1627 
1628  if (!is_last_report) {
1629  if (last_time == -1) {
1630  last_time = cur_time;
1631  return;
1632  }
1633  if ((cur_time - last_time) < 500000)
1634  return;
1635  last_time = cur_time;
1636  }
1637 
1638  t = (cur_time-timer_start) / 1000000.0;
1639 
1640 
1641  oc = output_files[0]->ctx;
1642 
1643  total_size = avio_size(oc->pb);
1644  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1645  total_size = avio_tell(oc->pb);
1646 
1647  buf[0] = '\0';
1648  vid = 0;
1649  av_bprint_init(&buf_script, 0, 1);
1650  for (i = 0; i < nb_output_streams; i++) {
1651  float q = -1;
1652  ost = output_streams[i];
1653  enc = ost->enc_ctx;
1654  if (!ost->stream_copy)
1655  q = ost->quality / (float) FF_QP2LAMBDA;
1656 
1657  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1658  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1659  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1660  ost->file_index, ost->index, q);
1661  }
1662  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1663  float fps;
1664 
1665  frame_number = ost->frame_number;
1666  fps = t > 1 ? frame_number / t : 0;
1667  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1668  frame_number, fps < 9.95, fps, q);
1669  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1670  av_bprintf(&buf_script, "fps=%.1f\n", fps);
1671  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1672  ost->file_index, ost->index, q);
1673  if (is_last_report)
1674  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1675  if (qp_hist) {
1676  int j;
1677  int qp = lrintf(q);
1678  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1679  qp_histogram[qp]++;
1680  for (j = 0; j < 32; j++)
1681  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1682  }
1683 
1684  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1685  int j;
1686  double error, error_sum = 0;
1687  double scale, scale_sum = 0;
1688  double p;
1689  char type[3] = { 'Y','U','V' };
1690  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1691  for (j = 0; j < 3; j++) {
1692  if (is_last_report) {
1693  error = enc->error[j];
1694  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1695  } else {
1696  error = ost->error[j];
1697  scale = enc->width * enc->height * 255.0 * 255.0;
1698  }
1699  if (j)
1700  scale /= 4;
1701  error_sum += error;
1702  scale_sum += scale;
1703  p = psnr(error / scale);
1704  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1705  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1706  ost->file_index, ost->index, type[j] | 32, p);
1707  }
1708  p = psnr(error_sum / scale_sum);
1709  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1710  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1711  ost->file_index, ost->index, p);
1712  }
1713  vid = 1;
1714  }
1715  /* compute min output value */
1717  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1718  ost->st->time_base, AV_TIME_BASE_Q));
1719  if (is_last_report)
1720  nb_frames_drop += ost->last_dropped;
1721  }
1722 
1723  secs = FFABS(pts) / AV_TIME_BASE;
1724  us = FFABS(pts) % AV_TIME_BASE;
1725  mins = secs / 60;
1726  secs %= 60;
1727  hours = mins / 60;
1728  mins %= 60;
1729 
1730  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1731  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1732 
1733  if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1734  "size=N/A time=");
1735  else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1736  "size=%8.0fkB time=", total_size / 1024.0);
1737  if (pts < 0)
1738  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1739  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1740  "%02d:%02d:%02d.%02d ", hours, mins, secs,
1741  (100 * us) / AV_TIME_BASE);
1742 
1743  if (bitrate < 0) {
1744  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1745  av_bprintf(&buf_script, "bitrate=N/A\n");
1746  }else{
1747  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1748  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1749  }
1750 
1751  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1752  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1753  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1754  av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1755  hours, mins, secs, us);
1756 
1758  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1760  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1761  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1762 
1763  if (speed < 0) {
1764  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1765  av_bprintf(&buf_script, "speed=N/A\n");
1766  } else {
1767  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1768  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1769  }
1770 
1771  if (print_stats || is_last_report) {
1772  const char end = is_last_report ? '\n' : '\r';
1773  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1774  fprintf(stderr, "%s %c", buf, end);
1775  } else
1776  av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1777 
1778  fflush(stderr);
1779  }
1780 
1781  if (progress_avio) {
1782  av_bprintf(&buf_script, "progress=%s\n",
1783  is_last_report ? "end" : "continue");
1784  avio_write(progress_avio, buf_script.str,
1785  FFMIN(buf_script.len, buf_script.size - 1));
1786  avio_flush(progress_avio);
1787  av_bprint_finalize(&buf_script, NULL);
1788  if (is_last_report) {
1789  if ((ret = avio_closep(&progress_avio)) < 0)
1791  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1792  }
1793  }
1794 
1795  if (is_last_report)
1796  print_final_stats(total_size);
1797 }
1798 
1799 static void flush_encoders(void)
1800 {
1801  int i, ret;
1802 
1803  for (i = 0; i < nb_output_streams; i++) {
1804  OutputStream *ost = output_streams[i];
1805  AVCodecContext *enc = ost->enc_ctx;
1806  OutputFile *of = output_files[ost->file_index];
1807 
1808  if (!ost->encoding_needed)
1809  continue;
1810 
1811  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1812  continue;
1813 #if FF_API_LAVF_FMT_RAWPICTURE
1814  if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1815  continue;
1816 #endif
1817 
1819  continue;
1820 
1821  avcodec_send_frame(enc, NULL);
1822 
1823  for (;;) {
1824  const char *desc = NULL;
1825  AVPacket pkt;
1826  int pkt_size;
1827 
1828  switch (enc->codec_type) {
1829  case AVMEDIA_TYPE_AUDIO:
1830  desc = "audio";
1831  break;
1832  case AVMEDIA_TYPE_VIDEO:
1833  desc = "video";
1834  break;
1835  default:
1836  av_assert0(0);
1837  }
1838 
1839  av_init_packet(&pkt);
1840  pkt.data = NULL;
1841  pkt.size = 0;
1842 
1844  ret = avcodec_receive_packet(enc, &pkt);
1845  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1846  if (ret < 0 && ret != AVERROR_EOF) {
1847  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1848  desc,
1849  av_err2str(ret));
1850  exit_program(1);
1851  }
1852  if (ost->logfile && enc->stats_out) {
1853  fprintf(ost->logfile, "%s", enc->stats_out);
1854  }
1855  if (ret == AVERROR_EOF) {
1856  break;
1857  }
1858  if (ost->finished & MUXER_FINISHED) {
1859  av_packet_unref(&pkt);
1860  continue;
1861  }
1862  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1863  pkt_size = pkt.size;
1864  output_packet(of, &pkt, ost);
1866  do_video_stats(ost, pkt_size);
1867  }
1868  }
1869  }
1870 }
1871 
1872 /*
1873  * Check whether a packet from ist should be written into ost at this time
1874  */
1876 {
1877  OutputFile *of = output_files[ost->file_index];
1878  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1879 
1880  if (ost->source_index != ist_index)
1881  return 0;
1882 
1883  if (ost->finished)
1884  return 0;
1885 
1886  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1887  return 0;
1888 
1889  return 1;
1890 }
1891 
1892 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1893 {
1894  OutputFile *of = output_files[ost->file_index];
1895  InputFile *f = input_files [ist->file_index];
1896  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1897  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1898  AVPicture pict;
1899  AVPacket opkt;
1900 
1901  av_init_packet(&opkt);
1902 
1903  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1905  return;
1906 
1907  if (!ost->frame_number && !ost->copy_prior_start) {
1908  int64_t comp_start = start_time;
1909  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1910  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1911  if (pkt->pts == AV_NOPTS_VALUE ?
1912  ist->pts < comp_start :
1913  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1914  return;
1915  }
1916 
1917  if (of->recording_time != INT64_MAX &&
1918  ist->pts >= of->recording_time + start_time) {
1919  close_output_stream(ost);
1920  return;
1921  }
1922 
1923  if (f->recording_time != INT64_MAX) {
1924  start_time = f->ctx->start_time;
1925  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1926  start_time += f->start_time;
1927  if (ist->pts >= f->recording_time + start_time) {
1928  close_output_stream(ost);
1929  return;
1930  }
1931  }
1932 
1933  /* force the input stream PTS */
1934  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1935  ost->sync_opts++;
1936 
1937  if (pkt->pts != AV_NOPTS_VALUE)
1938  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1939  else
1940  opkt.pts = AV_NOPTS_VALUE;
1941 
1942  if (pkt->dts == AV_NOPTS_VALUE)
1943  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1944  else
1945  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1946  opkt.dts -= ost_tb_start_time;
1947 
1948  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1950  if(!duration)
1951  duration = ist->dec_ctx->frame_size;
1952  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1954  ost->st->time_base) - ost_tb_start_time;
1955  }
1956 
1957  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1958  opkt.flags = pkt->flags;
1959  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1960  if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
1961  && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
1962  && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
1963  && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
1964  ) {
1965  int ret = av_parser_change(ost->parser, ost->parser_avctx,
1966  &opkt.data, &opkt.size,
1967  pkt->data, pkt->size,
1969  if (ret < 0) {
1970  av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1971  av_err2str(ret));
1972  exit_program(1);
1973  }
1974  if (ret) {
1975  opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1976  if (!opkt.buf)
1977  exit_program(1);
1978  }
1979  } else {
1980  opkt.data = pkt->data;
1981  opkt.size = pkt->size;
1982  }
1983  av_copy_packet_side_data(&opkt, pkt);
1984 
1985 #if FF_API_LAVF_FMT_RAWPICTURE
1986  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
1987  ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
1988  (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1989  /* store AVPicture in AVPacket, as expected by the output format */
1990  int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
1991  if (ret < 0) {
1992  av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1993  av_err2str(ret));
1994  exit_program(1);
1995  }
1996  opkt.data = (uint8_t *)&pict;
1997  opkt.size = sizeof(AVPicture);
1998  opkt.flags |= AV_PKT_FLAG_KEY;
1999  }
2000 #endif
2001 
2002  output_packet(of, &opkt, ost);
2003 }
2004 
2006 {
2007  AVCodecContext *dec = ist->dec_ctx;
2008 
2009  if (!dec->channel_layout) {
2010  char layout_name[256];
2011 
2012  if (dec->channels > ist->guess_layout_max)
2013  return 0;
2015  if (!dec->channel_layout)
2016  return 0;
2017  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2018  dec->channels, dec->channel_layout);
2019  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2020  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2021  }
2022  return 1;
2023 }
2024 
2025 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2026 {
2027  if (*got_output || ret<0)
2028  decode_error_stat[ret<0] ++;
2029 
2030  if (ret < 0 && exit_on_error)
2031  exit_program(1);
2032 
2033  if (exit_on_error && *got_output && ist) {
2035  av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2036  exit_program(1);
2037  }
2038  }
2039 }
2040 
2041 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2042 // There is the following difference: if you got a frame, you must call
2043 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2044 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2045 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2046 {
2047  int ret;
2048 
2049  *got_frame = 0;
2050 
2051  if (pkt) {
2052  ret = avcodec_send_packet(avctx, pkt);
2053  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2054  // decoded frames with avcodec_receive_frame() until done.
2055  if (ret < 0 && ret != AVERROR_EOF)
2056  return ret;
2057  }
2058 
2059  ret = avcodec_receive_frame(avctx, frame);
2060  if (ret < 0 && ret != AVERROR(EAGAIN))
2061  return ret;
2062  if (ret >= 0)
2063  *got_frame = 1;
2064 
2065  return 0;
2066 }
2067 
2068 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2069 {
2070  int i, ret;
2071  AVFrame *f;
2072 
2073  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2074  for (i = 0; i < ist->nb_filters; i++) {
2075  if (i < ist->nb_filters - 1) {
2076  f = ist->filter_frame;
2077  ret = av_frame_ref(f, decoded_frame);
2078  if (ret < 0)
2079  break;
2080  } else
2081  f = decoded_frame;
2082  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2084  if (ret == AVERROR_EOF)
2085  ret = 0; /* ignore */
2086  if (ret < 0) {
2088  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2089  break;
2090  }
2091  }
2092  return ret;
2093 }
2094 
2095 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2096 {
2097  AVFrame *decoded_frame;
2098  AVCodecContext *avctx = ist->dec_ctx;
2099  int i, ret, err = 0, resample_changed;
2100  AVRational decoded_frame_tb;
2101 
2102  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2103  return AVERROR(ENOMEM);
2104  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2105  return AVERROR(ENOMEM);
2106  decoded_frame = ist->decoded_frame;
2107 
2109  ret = decode(avctx, decoded_frame, got_output, pkt);
2110  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2111 
2112  if (ret >= 0 && avctx->sample_rate <= 0) {
2113  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2114  ret = AVERROR_INVALIDDATA;
2115  }
2116 
2117  if (ret != AVERROR_EOF)
2118  check_decode_result(ist, got_output, ret);
2119 
2120  if (!*got_output || ret < 0)
2121  return ret;
2122 
2123  ist->samples_decoded += decoded_frame->nb_samples;
2124  ist->frames_decoded++;
2125 
2126 #if 1
2127  /* increment next_dts to use for the case where the input stream does not
2128  have timestamps or there are multiple frames in the packet */
2129  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2130  avctx->sample_rate;
2131  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2132  avctx->sample_rate;
2133 #endif
2134 
2135  resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
2136  ist->resample_channels != avctx->channels ||
2137  ist->resample_channel_layout != decoded_frame->channel_layout ||
2138  ist->resample_sample_rate != decoded_frame->sample_rate;
2139  if (resample_changed) {
2140  char layout1[64], layout2[64];
2141 
2142  if (!guess_input_channel_layout(ist)) {
2143  av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
2144  "layout for Input Stream #%d.%d\n", ist->file_index,
2145  ist->st->index);
2146  exit_program(1);
2147  }
2148  decoded_frame->channel_layout = avctx->channel_layout;
2149 
2150  av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2152  av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2153  decoded_frame->channel_layout);
2154 
2156  "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2157  ist->file_index, ist->st->index,
2159  ist->resample_channels, layout1,
2160  decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2161  avctx->channels, layout2);
2162 
2163  ist->resample_sample_fmt = decoded_frame->format;
2164  ist->resample_sample_rate = decoded_frame->sample_rate;
2165  ist->resample_channel_layout = decoded_frame->channel_layout;
2166  ist->resample_channels = avctx->channels;
2167 
2168  for (i = 0; i < ist->nb_filters; i++) {
2169  err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
2170  if (err < 0) {
2172  "Error reconfiguring input stream %d:%d filter %d\n",
2173  ist->file_index, ist->st->index, i);
2174  goto fail;
2175  }
2176  }
2177 
2178  for (i = 0; i < nb_filtergraphs; i++)
2179  if (ist_in_filtergraph(filtergraphs[i], ist)) {
2180  FilterGraph *fg = filtergraphs[i];
2181  if (configure_filtergraph(fg) < 0) {
2182  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2183  exit_program(1);
2184  }
2185  }
2186  }
2187 
2188  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2189  decoded_frame_tb = ist->st->time_base;
2190  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2191  decoded_frame->pts = pkt->pts;
2192  decoded_frame_tb = ist->st->time_base;
2193  }else {
2194  decoded_frame->pts = ist->dts;
2195  decoded_frame_tb = AV_TIME_BASE_Q;
2196  }
2197  if (decoded_frame->pts != AV_NOPTS_VALUE)
2198  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2199  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2200  (AVRational){1, avctx->sample_rate});
2201  ist->nb_samples = decoded_frame->nb_samples;
2202  err = send_frame_to_filters(ist, decoded_frame);
2203  decoded_frame->pts = AV_NOPTS_VALUE;
2204 
2205 fail:
2206  av_frame_unref(ist->filter_frame);
2207  av_frame_unref(decoded_frame);
2208  return err < 0 ? err : ret;
2209 }
2210 
2211 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof)
2212 {
2213  AVFrame *decoded_frame;
2214  int i, ret = 0, err = 0, resample_changed;
2215  int64_t best_effort_timestamp;
2216  int64_t dts = AV_NOPTS_VALUE;
2217  AVPacket avpkt;
2218 
2219  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2220  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2221  // skip the packet.
2222  if (!eof && pkt && pkt->size == 0)
2223  return 0;
2224 
2225  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2226  return AVERROR(ENOMEM);
2227  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2228  return AVERROR(ENOMEM);
2229  decoded_frame = ist->decoded_frame;
2230  if (ist->dts != AV_NOPTS_VALUE)
2231  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2232  if (pkt) {
2233  avpkt = *pkt;
2234  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2235  }
2236 
2237  // The old code used to set dts on the drain packet, which does not work
2238  // with the new API anymore.
2239  if (eof) {
2240  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2241  if (!new)
2242  return AVERROR(ENOMEM);
2243  ist->dts_buffer = new;
2244  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2245  }
2246 
2248  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2249  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2250 
2251  // The following line may be required in some cases where there is no parser
2252  // or the parser does not has_b_frames correctly
2253  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2254  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2255  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2256  } else
2258  "video_delay is larger in decoder than demuxer %d > %d.\n"
2259  "If you want to help, upload a sample "
2260  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2261  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2262  ist->dec_ctx->has_b_frames,
2263  ist->st->codecpar->video_delay);
2264  }
2265 
2266  if (ret != AVERROR_EOF)
2267  check_decode_result(ist, got_output, ret);
2268 
2269  if (*got_output && ret >= 0) {
2270  if (ist->dec_ctx->width != decoded_frame->width ||
2271  ist->dec_ctx->height != decoded_frame->height ||
2272  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2273  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2274  decoded_frame->width,
2275  decoded_frame->height,
2276  decoded_frame->format,
2277  ist->dec_ctx->width,
2278  ist->dec_ctx->height,
2279  ist->dec_ctx->pix_fmt);
2280  }
2281  }
2282 
2283  if (!*got_output || ret < 0)
2284  return ret;
2285 
2286  if(ist->top_field_first>=0)
2287  decoded_frame->top_field_first = ist->top_field_first;
2288 
2289  ist->frames_decoded++;
2290 
2291  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2292  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2293  if (err < 0)
2294  goto fail;
2295  }
2296  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2297 
2298  best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2299 
2300  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2301  best_effort_timestamp = ist->dts_buffer[0];
2302 
2303  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2304  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2305  ist->nb_dts_buffer--;
2306  }
2307 
2308  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2309  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2310 
2311  if (ts != AV_NOPTS_VALUE)
2312  ist->next_pts = ist->pts = ts;
2313  }
2314 
2315  if (debug_ts) {
2316  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2317  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2318  ist->st->index, av_ts2str(decoded_frame->pts),
2319  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2320  best_effort_timestamp,
2321  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2322  decoded_frame->key_frame, decoded_frame->pict_type,
2323  ist->st->time_base.num, ist->st->time_base.den);
2324  }
2325 
2326  if (ist->st->sample_aspect_ratio.num)
2327  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2328 
2329  resample_changed = ist->resample_width != decoded_frame->width ||
2330  ist->resample_height != decoded_frame->height ||
2331  ist->resample_pix_fmt != decoded_frame->format;
2332  if (resample_changed) {
2333  av_log(NULL, AV_LOG_INFO,
2334  "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2335  ist->file_index, ist->st->index,
2337  decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2338 
2339  ist->resample_width = decoded_frame->width;
2340  ist->resample_height = decoded_frame->height;
2341  ist->resample_pix_fmt = decoded_frame->format;
2342 
2343  for (i = 0; i < ist->nb_filters; i++) {
2344  err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
2345  if (err < 0) {
2346  av_log(NULL, AV_LOG_ERROR,
2347  "Error reconfiguring input stream %d:%d filter %d\n",
2348  ist->file_index, ist->st->index, i);
2349  goto fail;
2350  }
2351  }
2352 
2353  for (i = 0; i < nb_filtergraphs; i++) {
2354  if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2355  configure_filtergraph(filtergraphs[i]) < 0) {
2356  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2357  exit_program(1);
2358  }
2359  }
2360  }
2361 
2362  err = send_frame_to_filters(ist, decoded_frame);
2363 
2364 fail:
2366  av_frame_unref(decoded_frame);
2367  return err < 0 ? err : ret;
2368 }
2369 
2370 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2371 {
2372  AVSubtitle subtitle;
2373  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2374  &subtitle, got_output, pkt);
2375 
2376  check_decode_result(NULL, got_output, ret);
2377 
2378  if (ret < 0 || !*got_output) {
2379  if (!pkt->size)
2380  sub2video_flush(ist);
2381  return ret;
2382  }
2383 
2384  if (ist->fix_sub_duration) {
2385  int end = 1;
2386  if (ist->prev_sub.got_output) {
2387  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2388  1000, AV_TIME_BASE);
2389  if (end < ist->prev_sub.subtitle.end_display_time) {
2390  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2391  "Subtitle duration reduced from %d to %d%s\n",
2393  end <= 0 ? ", dropping it" : "");
2395  }
2396  }
2397  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2398  FFSWAP(int, ret, ist->prev_sub.ret);
2399  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2400  if (end <= 0)
2401  goto out;
2402  }
2403 
2404  if (!*got_output)
2405  return ret;
2406 
2407  sub2video_update(ist, &subtitle);
2408 
2409  if (!subtitle.num_rects)
2410  goto out;
2411 
2412  ist->frames_decoded++;
2413 
2414  for (i = 0; i < nb_output_streams; i++) {
2415  OutputStream *ost = output_streams[i];
2416 
2417  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2418  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2419  continue;
2420 
2421  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2422  }
2423 
2424 out:
2425  avsubtitle_free(&subtitle);
2426  return ret;
2427 }
2428 
2430 {
2431  int i, ret;
2432  for (i = 0; i < ist->nb_filters; i++) {
2433  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2434  if (ret < 0)
2435  return ret;
2436  }
2437  return 0;
2438 }
2439 
2440 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2441 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2442 {
2443  int ret = 0, i;
2444  int repeating = 0;
2445  int eof_reached = 0;
2446 
2447  AVPacket avpkt;
2448  if (!ist->saw_first_ts) {
2449  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2450  ist->pts = 0;
2451  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2452  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2453  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2454  }
2455  ist->saw_first_ts = 1;
2456  }
2457 
2458  if (ist->next_dts == AV_NOPTS_VALUE)
2459  ist->next_dts = ist->dts;
2460  if (ist->next_pts == AV_NOPTS_VALUE)
2461  ist->next_pts = ist->pts;
2462 
2463  if (!pkt) {
2464  /* EOF handling */
2465  av_init_packet(&avpkt);
2466  avpkt.data = NULL;
2467  avpkt.size = 0;
2468  } else {
2469  avpkt = *pkt;
2470  }
2471 
2472  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2473  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2474  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2475  ist->next_pts = ist->pts = ist->dts;
2476  }
2477 
2478  // while we have more to decode or while the decoder did output something on EOF
2479  while (ist->decoding_needed) {
2480  int duration = 0;
2481  int got_output = 0;
2482 
2483  ist->pts = ist->next_pts;
2484  ist->dts = ist->next_dts;
2485 
2486  switch (ist->dec_ctx->codec_type) {
2487  case AVMEDIA_TYPE_AUDIO:
2488  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output);
2489  break;
2490  case AVMEDIA_TYPE_VIDEO:
2491  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt);
2492  if (!repeating || !pkt || got_output) {
2493  if (pkt && pkt->duration) {
2494  duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2495  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2497  duration = ((int64_t)AV_TIME_BASE *
2498  ist->dec_ctx->framerate.den * ticks) /
2500  }
2501 
2502  if(ist->dts != AV_NOPTS_VALUE && duration) {
2503  ist->next_dts += duration;
2504  }else
2505  ist->next_dts = AV_NOPTS_VALUE;
2506  }
2507 
2508  if (got_output)
2509  ist->next_pts += duration; //FIXME the duration is not correct in some cases
2510  break;
2511  case AVMEDIA_TYPE_SUBTITLE:
2512  if (repeating)
2513  break;
2514  ret = transcode_subtitles(ist, &avpkt, &got_output);
2515  if (!pkt && ret >= 0)
2516  ret = AVERROR_EOF;
2517  break;
2518  default:
2519  return -1;
2520  }
2521 
2522  if (ret == AVERROR_EOF) {
2523  eof_reached = 1;
2524  break;
2525  }
2526 
2527  if (ret < 0) {
2528  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2529  ist->file_index, ist->st->index, av_err2str(ret));
2530  if (exit_on_error)
2531  exit_program(1);
2532  // Decoding might not terminate if we're draining the decoder, and
2533  // the decoder keeps returning an error.
2534  // This should probably be considered a libavcodec issue.
2535  // Sample: fate-vsynth1-dnxhd-720p-hr-lb
2536  if (!pkt)
2537  eof_reached = 1;
2538  break;
2539  }
2540 
2541  if (!got_output)
2542  break;
2543 
2544  // During draining, we might get multiple output frames in this loop.
2545  // ffmpeg.c does not drain the filter chain on configuration changes,
2546  // which means if we send multiple frames at once to the filters, and
2547  // one of those frames changes configuration, the buffered frames will
2548  // be lost. This can upset certain FATE tests.
2549  // Decode only 1 frame per call on EOF to appease these FATE tests.
2550  // The ideal solution would be to rewrite decoding to use the new
2551  // decoding API in a better way.
2552  if (!pkt)
2553  break;
2554 
2555  repeating = 1;
2556  }
2557 
2558  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2559  /* except when looping we need to flush but not to send an EOF */
2560  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2561  int ret = send_filter_eof(ist);
2562  if (ret < 0) {
2563  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2564  exit_program(1);
2565  }
2566  }
2567 
2568  /* handle stream copy */
2569  if (!ist->decoding_needed) {
2570  ist->dts = ist->next_dts;
2571  switch (ist->dec_ctx->codec_type) {
2572  case AVMEDIA_TYPE_AUDIO:
2573  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2574  ist->dec_ctx->sample_rate;
2575  break;
2576  case AVMEDIA_TYPE_VIDEO:
2577  if (ist->framerate.num) {
2578  // TODO: Remove work-around for c99-to-c89 issue 7
2579  AVRational time_base_q = AV_TIME_BASE_Q;
2580  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2581  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2582  } else if (pkt->duration) {
2583  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2584  } else if(ist->dec_ctx->framerate.num != 0) {
2585  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2586  ist->next_dts += ((int64_t)AV_TIME_BASE *
2587  ist->dec_ctx->framerate.den * ticks) /
2589  }
2590  break;
2591  }
2592  ist->pts = ist->dts;
2593  ist->next_pts = ist->next_dts;
2594  }
2595  for (i = 0; pkt && i < nb_output_streams; i++) {
2596  OutputStream *ost = output_streams[i];
2597 
2598  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2599  continue;
2600 
2601  do_streamcopy(ist, ost, pkt);
2602  }
2603 
2604  return !eof_reached;
2605 }
2606 
2607 static void print_sdp(void)
2608 {
2609  char sdp[16384];
2610  int i;
2611  int j;
2612  AVIOContext *sdp_pb;
2613  AVFormatContext **avc;
2614 
2615  for (i = 0; i < nb_output_files; i++) {
2616  if (!output_files[i]->header_written)
2617  return;
2618  }
2619 
2620  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2621  if (!avc)
2622  exit_program(1);
2623  for (i = 0, j = 0; i < nb_output_files; i++) {
2624  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2625  avc[j] = output_files[i]->ctx;
2626  j++;
2627  }
2628  }
2629 
2630  if (!j)
2631  goto fail;
2632 
2633  av_sdp_create(avc, j, sdp, sizeof(sdp));
2634 
2635  if (!sdp_filename) {
2636  printf("SDP:\n%s\n", sdp);
2637  fflush(stdout);
2638  } else {
2639  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2640  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2641  } else {
2642  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2643  avio_closep(&sdp_pb);
2645  }
2646  }
2647 
2648 fail:
2649  av_freep(&avc);
2650 }
2651 
2653 {
2654  int i;
2655  for (i = 0; hwaccels[i].name; i++)
2656  if (hwaccels[i].pix_fmt == pix_fmt)
2657  return &hwaccels[i];
2658  return NULL;
2659 }
2660 
2662 {
2663  InputStream *ist = s->opaque;
2664  const enum AVPixelFormat *p;
2665  int ret;
2666 
2667  for (p = pix_fmts; *p != -1; p++) {
2669  const HWAccel *hwaccel;
2670 
2671  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2672  break;
2673 
2674  hwaccel = get_hwaccel(*p);
2675  if (!hwaccel ||
2676  (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2677  (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2678  continue;
2679 
2680  ret = hwaccel->init(s);
2681  if (ret < 0) {
2682  if (ist->hwaccel_id == hwaccel->id) {
2684  "%s hwaccel requested for input stream #%d:%d, "
2685  "but cannot be initialized.\n", hwaccel->name,
2686  ist->file_index, ist->st->index);
2687  return AV_PIX_FMT_NONE;
2688  }
2689  continue;
2690  }
2691 
2692  if (ist->hw_frames_ctx) {
2694  if (!s->hw_frames_ctx)
2695  return AV_PIX_FMT_NONE;
2696  }
2697 
2698  ist->active_hwaccel_id = hwaccel->id;
2699  ist->hwaccel_pix_fmt = *p;
2700  break;
2701  }
2702 
2703  return *p;
2704 }
2705 
2707 {
2708  InputStream *ist = s->opaque;
2709 
2710  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2711  return ist->hwaccel_get_buffer(s, frame, flags);
2712 
2713  return avcodec_default_get_buffer2(s, frame, flags);
2714 }
2715 
2716 static int init_input_stream(int ist_index, char *error, int error_len)
2717 {
2718  int i, ret;
2719  InputStream *ist = input_streams[ist_index];
2720 
2721  for (i = 0; i < ist->nb_filters; i++) {
2722  ret = ifilter_parameters_from_decoder(ist->filters[i], ist->dec_ctx);
2723  if (ret < 0) {
2724  av_log(NULL, AV_LOG_FATAL, "Error initializing filter input\n");
2725  return ret;
2726  }
2727  }
2728 
2729  if (ist->decoding_needed) {
2730  AVCodec *codec = ist->dec;
2731  if (!codec) {
2732  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2733  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2734  return AVERROR(EINVAL);
2735  }
2736 
2737  ist->dec_ctx->opaque = ist;
2738  ist->dec_ctx->get_format = get_format;
2739  ist->dec_ctx->get_buffer2 = get_buffer;
2740  ist->dec_ctx->thread_safe_callbacks = 1;
2741 
2742  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2743  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2744  (ist->decoding_needed & DECODING_FOR_OST)) {
2745  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2747  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2748  }
2749 
2750  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2751 
2752  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2753  * audio, and video decoders such as cuvid or mediacodec */
2755 
2756  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2757  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2758  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2759  if (ret == AVERROR_EXPERIMENTAL)
2760  abort_codec_experimental(codec, 0);
2761 
2762  snprintf(error, error_len,
2763  "Error while opening decoder for input stream "
2764  "#%d:%d : %s",
2765  ist->file_index, ist->st->index, av_err2str(ret));
2766  return ret;
2767  }
2769  }
2770 
2771  ist->next_pts = AV_NOPTS_VALUE;
2772  ist->next_dts = AV_NOPTS_VALUE;
2773 
2774  return 0;
2775 }
2776 
2778 {
2779  if (ost->source_index >= 0)
2780  return input_streams[ost->source_index];
2781  return NULL;
2782 }
2783 
2784 static int compare_int64(const void *a, const void *b)
2785 {
2786  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2787 }
2788 
2789 /* open the muxer when all the streams are initialized */
2790 static int check_init_output_file(OutputFile *of, int file_index)
2791 {
2792  int ret, i;
2793 
2794  for (i = 0; i < of->ctx->nb_streams; i++) {
2795  OutputStream *ost = output_streams[of->ost_index + i];
2796  if (!ost->initialized)
2797  return 0;
2798  }
2799 
2800  of->ctx->interrupt_callback = int_cb;
2801 
2802  ret = avformat_write_header(of->ctx, &of->opts);
2803  if (ret < 0) {
2805  "Could not write header for output file #%d "
2806  "(incorrect codec parameters ?): %s\n",
2807  file_index, av_err2str(ret));
2808  return ret;
2809  }
2810  //assert_avoptions(of->opts);
2811  of->header_written = 1;
2812 
2813  av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2814 
2815  if (sdp_filename || want_sdp)
2816  print_sdp();
2817 
2818  /* flush the muxing queues */
2819  for (i = 0; i < of->ctx->nb_streams; i++) {
2820  OutputStream *ost = output_streams[of->ost_index + i];
2821 
2822  while (av_fifo_size(ost->muxing_queue)) {
2823  AVPacket pkt;
2824  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2825  write_packet(of, &pkt, ost);
2826  }
2827  }
2828 
2829  return 0;
2830 }
2831 
2833 {
2834  AVBSFContext *ctx;
2835  int i, ret;
2836 
2837  if (!ost->nb_bitstream_filters)
2838  return 0;
2839 
2840  for (i = 0; i < ost->nb_bitstream_filters; i++) {
2841  ctx = ost->bsf_ctx[i];
2842 
2843  ret = avcodec_parameters_copy(ctx->par_in,
2844  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2845  if (ret < 0)
2846  return ret;
2847 
2848  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2849 
2850  ret = av_bsf_init(ctx);
2851  if (ret < 0) {
2852  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2853  ost->bsf_ctx[i]->filter->name);
2854  return ret;
2855  }
2856  }
2857 
2858  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
2859  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2860  if (ret < 0)
2861  return ret;
2862 
2863  ost->st->time_base = ctx->time_base_out;
2864 
2865  return 0;
2866 }
2867 
2869 {
2870  OutputFile *of = output_files[ost->file_index];
2871  InputStream *ist = get_input_stream(ost);
2872  AVCodecParameters *par_dst = ost->st->codecpar;
2873  AVCodecParameters *par_src = ost->ref_par;
2874  AVRational sar;
2875  int i, ret;
2876  uint32_t codec_tag = par_dst->codec_tag;
2877 
2878  av_assert0(ist && !ost->filter);
2879 
2880  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
2881  if (ret >= 0)
2882  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2883  if (ret < 0) {
2885  "Error setting up codec context options.\n");
2886  return ret;
2887  }
2889 
2890  if (!codec_tag) {
2891  unsigned int codec_tag_tmp;
2892  if (!of->ctx->oformat->codec_tag ||
2893  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
2894  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
2895  codec_tag = par_src->codec_tag;
2896  }
2897 
2898  ret = avcodec_parameters_copy(par_dst, par_src);
2899  if (ret < 0)
2900  return ret;
2901 
2902  par_dst->codec_tag = codec_tag;
2903 
2904  if (!ost->frame_rate.num)
2905  ost->frame_rate = ist->framerate;
2906  ost->st->avg_frame_rate = ost->frame_rate;
2907 
2909  if (ret < 0)
2910  return ret;
2911 
2912  // copy timebase while removing common factors
2913  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
2915 
2916  // copy estimated duration as a hint to the muxer
2917  if (ost->st->duration <= 0 && ist->st->duration > 0)
2918  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
2919 
2920  // copy disposition
2921  ost->st->disposition = ist->st->disposition;
2922 
2923  if (ist->st->nb_side_data) {
2924  ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2925  sizeof(*ist->st->side_data));
2926  if (!ost->st->side_data)
2927  return AVERROR(ENOMEM);
2928 
2929  ost->st->nb_side_data = 0;
2930  for (i = 0; i < ist->st->nb_side_data; i++) {
2931  const AVPacketSideData *sd_src = &ist->st->side_data[i];
2932  AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2933 
2934  if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2935  continue;
2936 
2937  sd_dst->data = av_malloc(sd_src->size);
2938  if (!sd_dst->data)
2939  return AVERROR(ENOMEM);
2940  memcpy(sd_dst->data, sd_src->data, sd_src->size);
2941  sd_dst->size = sd_src->size;
2942  sd_dst->type = sd_src->type;
2943  ost->st->nb_side_data++;
2944  }
2945  }
2946 
2947  ost->parser = av_parser_init(par_dst->codec_id);
2948  ost->parser_avctx = avcodec_alloc_context3(NULL);
2949  if (!ost->parser_avctx)
2950  return AVERROR(ENOMEM);
2951 
2952  switch (par_dst->codec_type) {
2953  case AVMEDIA_TYPE_AUDIO:
2954  if (audio_volume != 256) {
2955  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2956  exit_program(1);
2957  }
2958  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
2959  par_dst->block_align= 0;
2960  if(par_dst->codec_id == AV_CODEC_ID_AC3)
2961  par_dst->block_align= 0;
2962  break;
2963  case AVMEDIA_TYPE_VIDEO:
2964  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2965  sar =
2966  av_mul_q(ost->frame_aspect_ratio,
2967  (AVRational){ par_dst->height, par_dst->width });
2968  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2969  "with stream copy may produce invalid files\n");
2970  }
2971  else if (ist->st->sample_aspect_ratio.num)
2972  sar = ist->st->sample_aspect_ratio;
2973  else
2974  sar = par_src->sample_aspect_ratio;
2975  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
2976  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2977  ost->st->r_frame_rate = ist->st->r_frame_rate;
2978  break;
2979  }
2980 
2981  return 0;
2982 }
2983 
2985 {
2986  AVDictionaryEntry *e;
2987 
2988  uint8_t *encoder_string;
2989  int encoder_string_len;
2990  int format_flags = 0;
2991  int codec_flags = 0;
2992 
2993  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2994  return;
2995 
2996  e = av_dict_get(of->opts, "fflags", NULL, 0);
2997  if (e) {
2998  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2999  if (!o)
3000  return;
3001  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3002  }
3003  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3004  if (e) {
3005  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3006  if (!o)
3007  return;
3008  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3009  }
3010 
3011  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3012  encoder_string = av_mallocz(encoder_string_len);
3013  if (!encoder_string)
3014  exit_program(1);
3015 
3016  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3017  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3018  else
3019  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3020  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3021  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3023 }
3024 
3025 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3026  AVCodecContext *avctx)
3027 {
3028  char *p;
3029  int n = 1, i, size, index = 0;
3030  int64_t t, *pts;
3031 
3032  for (p = kf; *p; p++)
3033  if (*p == ',')
3034  n++;
3035  size = n;
3036  pts = av_malloc_array(size, sizeof(*pts));
3037  if (!pts) {
3038  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3039  exit_program(1);
3040  }
3041 
3042  p = kf;
3043  for (i = 0; i < n; i++) {
3044  char *next = strchr(p, ',');
3045 
3046  if (next)
3047  *next++ = 0;
3048 
3049  if (!memcmp(p, "chapters", 8)) {
3050 
3051  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3052  int j;
3053 
3054  if (avf->nb_chapters > INT_MAX - size ||
3055  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3056  sizeof(*pts)))) {
3058  "Could not allocate forced key frames array.\n");
3059  exit_program(1);
3060  }
3061  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3062  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3063 
3064  for (j = 0; j < avf->nb_chapters; j++) {
3065  AVChapter *c = avf->chapters[j];
3066  av_assert1(index < size);
3067  pts[index++] = av_rescale_q(c->start, c->time_base,
3068  avctx->time_base) + t;
3069  }
3070 
3071  } else {
3072 
3073  t = parse_time_or_die("force_key_frames", p, 1);
3074  av_assert1(index < size);
3075  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3076 
3077  }
3078 
3079  p = next;
3080  }
3081 
3082  av_assert0(index == size);
3083  qsort(pts, size, sizeof(*pts), compare_int64);
3084  ost->forced_kf_count = size;
3085  ost->forced_kf_pts = pts;
3086 }
3087 
3089 {
3090  InputStream *ist = get_input_stream(ost);
3091  AVCodecContext *enc_ctx = ost->enc_ctx;
3093  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3094  int j, ret;
3095 
3096  set_encoder_id(output_files[ost->file_index], ost);
3097 
3098  if (ist) {
3099  ost->st->disposition = ist->st->disposition;
3100 
3101  dec_ctx = ist->dec_ctx;
3102 
3103  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3104  } else {
3105  for (j = 0; j < oc->nb_streams; j++) {
3106  AVStream *st = oc->streams[j];
3107  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3108  break;
3109  }
3110  if (j == oc->nb_streams)
3111  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3114  }
3115 
3116  if ((enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3117  enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3119  FilterGraph *fg = ost->filter->graph;
3120 
3121  if (configure_filtergraph(fg)) {
3122  av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3123  exit_program(1);
3124  }
3125  }
3126 
3127  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3128  if (!ost->frame_rate.num)
3130  if (ist && !ost->frame_rate.num)
3131  ost->frame_rate = ist->framerate;
3132  if (ist && !ost->frame_rate.num)
3133  ost->frame_rate = ist->st->r_frame_rate;
3134  if (ist && !ost->frame_rate.num) {
3135  ost->frame_rate = (AVRational){25, 1};
3137  "No information "
3138  "about the input framerate is available. Falling "
3139  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3140  "if you want a different framerate.\n",
3141  ost->file_index, ost->index);
3142  }
3143 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3144  if (ost->enc->supported_framerates && !ost->force_fps) {
3145  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3146  ost->frame_rate = ost->enc->supported_framerates[idx];
3147  }
3148  // reduce frame rate for mpeg4 to be within the spec limits
3149  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3150  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3151  ost->frame_rate.num, ost->frame_rate.den, 65535);
3152  }
3153  }
3154 
3155  switch (enc_ctx->codec_type) {
3156  case AVMEDIA_TYPE_AUDIO:
3158  if (dec_ctx)
3159  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3160  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3164  enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3165  break;
3166  case AVMEDIA_TYPE_VIDEO:
3167  enc_ctx->time_base = av_inv_q(ost->frame_rate);
3168  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3170  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3172  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3173  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3174  }
3175  for (j = 0; j < ost->forced_kf_count; j++)
3176  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3178  enc_ctx->time_base);
3179 
3180  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3181  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3182  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3183  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3184  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3186  if (!strncmp(ost->enc->name, "libx264", 7) &&
3187  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3190  "No pixel format specified, %s for H.264 encoding chosen.\n"
3191  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3193  if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3194  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3197  "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3198  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3200  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3201  if (dec_ctx)
3202  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3203  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3204 
3205  ost->st->avg_frame_rate = ost->frame_rate;
3206 
3207  if (!dec_ctx ||
3208  enc_ctx->width != dec_ctx->width ||
3209  enc_ctx->height != dec_ctx->height ||
3210  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3212  }
3213 
3214  if (ost->forced_keyframes) {
3215  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3218  if (ret < 0) {
3220  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3221  return ret;
3222  }
3227 
3228  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3229  // parse it only for static kf timings
3230  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3232  }
3233  }
3234  break;
3235  case AVMEDIA_TYPE_SUBTITLE:
3236  enc_ctx->time_base = (AVRational){1, 1000};
3237  if (!enc_ctx->width) {
3238  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3239  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3240  }
3241  break;
3242  case AVMEDIA_TYPE_DATA:
3243  break;
3244  default:
3245  abort();
3246  break;
3247  }
3248 
3249  return 0;
3250 }
3251 
3252 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3253 {
3254  int ret = 0;
3255 
3256  if (ost->encoding_needed) {
3257  AVCodec *codec = ost->enc;
3258  AVCodecContext *dec = NULL;
3259  InputStream *ist;
3260 
3261  ret = init_output_stream_encode(ost);
3262  if (ret < 0)
3263  return ret;
3264 
3265  if ((ist = get_input_stream(ost)))
3266  dec = ist->dec_ctx;
3267  if (dec && dec->subtitle_header) {
3268  /* ASS code assumes this buffer is null terminated so add extra byte. */
3270  if (!ost->enc_ctx->subtitle_header)
3271  return AVERROR(ENOMEM);
3272  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3274  }
3275  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3276  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3277  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3278  !codec->defaults &&
3279  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3280  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3281  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3282 
3283  if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter)) {
3285  if (!ost->enc_ctx->hw_frames_ctx)
3286  return AVERROR(ENOMEM);
3287  }
3288 
3289  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3290  if (ret == AVERROR_EXPERIMENTAL)
3291  abort_codec_experimental(codec, 1);
3292  snprintf(error, error_len,
3293  "Error while opening encoder for output stream #%d:%d - "
3294  "maybe incorrect parameters such as bit_rate, rate, width or height",
3295  ost->file_index, ost->index);
3296  return ret;
3297  }
3298  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3299  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3301  ost->enc_ctx->frame_size);
3303  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3304  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3305  " It takes bits/s as argument, not kbits/s\n");
3306 
3308  if (ret < 0) {
3310  "Error initializing the output stream codec context.\n");
3311  exit_program(1);
3312  }
3313  /*
3314  * FIXME: ost->st->codec should't be needed here anymore.
3315  */
3316  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3317  if (ret < 0)
3318  return ret;
3319 
3320  if (ost->enc_ctx->nb_coded_side_data) {
3321  int i;
3322 
3324  sizeof(*ost->st->side_data));
3325  if (!ost->st->side_data)
3326  return AVERROR(ENOMEM);
3327 
3328  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3329  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3330  AVPacketSideData *sd_dst = &ost->st->side_data[i];
3331 
3332  sd_dst->data = av_malloc(sd_src->size);
3333  if (!sd_dst->data)
3334  return AVERROR(ENOMEM);
3335  memcpy(sd_dst->data, sd_src->data, sd_src->size);
3336  sd_dst->size = sd_src->size;
3337  sd_dst->type = sd_src->type;
3338  ost->st->nb_side_data++;
3339  }
3340  }
3341 
3342  // copy timebase while removing common factors
3343  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3344  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3345 
3346  // copy estimated duration as a hint to the muxer
3347  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3348  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3349 
3350  ost->st->codec->codec= ost->enc_ctx->codec;
3351  } else if (ost->stream_copy) {
3352  ret = init_output_stream_streamcopy(ost);
3353  if (ret < 0)
3354  return ret;
3355 
3356  /*
3357  * FIXME: will the codec context used by the parser during streamcopy
3358  * This should go away with the new parser API.
3359  */
3360  ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3361  if (ret < 0)
3362  return ret;
3363  }
3364 
3365  // parse user provided disposition, and update stream values
3366  if (ost->disposition) {
3367  static const AVOption opts[] = {
3368  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3369  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3370  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3371  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3372  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3373  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3374  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3375  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3376  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3377  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3378  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3379  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3380  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3381  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3382  { NULL },
3383  };
3384  static const AVClass class = {
3385  .class_name = "",
3386  .item_name = av_default_item_name,
3387  .option = opts,
3388  .version = LIBAVUTIL_VERSION_INT,
3389  };
3390  const AVClass *pclass = &class;
3391 
3392  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3393  if (ret < 0)
3394  return ret;
3395  }
3396 
3397  /* initialize bitstream filters for the output stream
3398  * needs to be done here, because the codec id for streamcopy is not
3399  * known until now */
3400  ret = init_output_bsfs(ost);
3401  if (ret < 0)
3402  return ret;
3403 
3404  ost->initialized = 1;
3405 
3406  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3407  if (ret < 0)
3408  return ret;
3409 
3410  return ret;
3411 }
3412 
3413 static void report_new_stream(int input_index, AVPacket *pkt)
3414 {
3415  InputFile *file = input_files[input_index];
3416  AVStream *st = file->ctx->streams[pkt->stream_index];
3417 
3418  if (pkt->stream_index < file->nb_streams_warn)
3419  return;
3420  av_log(file->ctx, AV_LOG_WARNING,
3421  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3423  input_index, pkt->stream_index,
3424  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3425  file->nb_streams_warn = pkt->stream_index + 1;
3426 }
3427 
3428 static int transcode_init(void)
3429 {
3430  int ret = 0, i, j, k;
3431  AVFormatContext *oc;
3432  OutputStream *ost;
3433  InputStream *ist;
3434  char error[1024] = {0};
3435 
3436  for (i = 0; i < nb_filtergraphs; i++) {
3437  FilterGraph *fg = filtergraphs[i];
3438  for (j = 0; j < fg->nb_outputs; j++) {
3439  OutputFilter *ofilter = fg->outputs[j];
3440  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3441  continue;
3442  if (fg->nb_inputs != 1)
3443  continue;
3444  for (k = nb_input_streams-1; k >= 0 ; k--)
3445  if (fg->inputs[0]->ist == input_streams[k])
3446  break;
3447  ofilter->ost->source_index = k;
3448  }
3449  }
3450 
3451  /* init framerate emulation */
3452  for (i = 0; i < nb_input_files; i++) {
3453  InputFile *ifile = input_files[i];
3454  if (ifile->rate_emu)
3455  for (j = 0; j < ifile->nb_streams; j++)
3456  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3457  }
3458 
3459  /* hwaccel transcoding */
3460  for (i = 0; i < nb_output_streams; i++) {
3461  ost = output_streams[i];
3462 
3463  if (!ost->stream_copy) {
3464 #if CONFIG_LIBMFX
3465  if (qsv_transcode_init(ost))
3466  exit_program(1);
3467 #endif
3468 
3469 #if CONFIG_CUVID
3470  if (cuvid_transcode_init(ost))
3471  exit_program(1);
3472 #endif
3473  }
3474  }
3475 
3476  /* init input streams */
3477  for (i = 0; i < nb_input_streams; i++)
3478  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3479  for (i = 0; i < nb_output_streams; i++) {
3480  ost = output_streams[i];
3481  avcodec_close(ost->enc_ctx);
3482  }
3483  goto dump_format;
3484  }
3485 
3486  /* open each encoder */
3487  for (i = 0; i < nb_output_streams; i++) {
3488  ret = init_output_stream(output_streams[i], error, sizeof(error));
3489  if (ret < 0)
3490  goto dump_format;
3491  }
3492 
3493  /* discard unused programs */
3494  for (i = 0; i < nb_input_files; i++) {
3495  InputFile *ifile = input_files[i];
3496  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3497  AVProgram *p = ifile->ctx->programs[j];
3498  int discard = AVDISCARD_ALL;
3499 
3500  for (k = 0; k < p->nb_stream_indexes; k++)
3501  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3502  discard = AVDISCARD_DEFAULT;
3503  break;
3504  }
3505  p->discard = discard;
3506  }
3507  }
3508 
3509  /* write headers for files with no streams */
3510  for (i = 0; i < nb_output_files; i++) {
3511  oc = output_files[i]->ctx;
3512  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3513  ret = check_init_output_file(output_files[i], i);
3514  if (ret < 0)
3515  goto dump_format;
3516  }
3517  }
3518 
3519  dump_format:
3520  /* dump the stream mapping */
3521  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3522  for (i = 0; i < nb_input_streams; i++) {
3523  ist = input_streams[i];
3524 
3525  for (j = 0; j < ist->nb_filters; j++) {
3526  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3527  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3528  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3529  ist->filters[j]->name);
3530  if (nb_filtergraphs > 1)
3531  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3532  av_log(NULL, AV_LOG_INFO, "\n");
3533  }
3534  }
3535  }
3536 
3537  for (i = 0; i < nb_output_streams; i++) {
3538  ost = output_streams[i];
3539 
3540  if (ost->attachment_filename) {
3541  /* an attached file */
3542  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3543  ost->attachment_filename, ost->file_index, ost->index);
3544  continue;
3545  }
3546 
3547  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3548  /* output from a complex graph */
3549  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3550  if (nb_filtergraphs > 1)
3551  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3552 
3553  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3554  ost->index, ost->enc ? ost->enc->name : "?");
3555  continue;
3556  }
3557 
3558  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3559  input_streams[ost->source_index]->file_index,
3560  input_streams[ost->source_index]->st->index,
3561  ost->file_index,
3562  ost->index);
3563  if (ost->sync_ist != input_streams[ost->source_index])
3564  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3565  ost->sync_ist->file_index,
3566  ost->sync_ist->st->index);
3567  if (ost->stream_copy)
3568  av_log(NULL, AV_LOG_INFO, " (copy)");
3569  else {
3570  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3571  const AVCodec *out_codec = ost->enc;
3572  const char *decoder_name = "?";
3573  const char *in_codec_name = "?";
3574  const char *encoder_name = "?";
3575  const char *out_codec_name = "?";
3576  const AVCodecDescriptor *desc;
3577 
3578  if (in_codec) {
3579  decoder_name = in_codec->name;
3580  desc = avcodec_descriptor_get(in_codec->id);
3581  if (desc)
3582  in_codec_name = desc->name;
3583  if (!strcmp(decoder_name, in_codec_name))
3584  decoder_name = "native";
3585  }
3586 
3587  if (out_codec) {
3588  encoder_name = out_codec->name;
3589  desc = avcodec_descriptor_get(out_codec->id);
3590  if (desc)
3591  out_codec_name = desc->name;
3592  if (!strcmp(encoder_name, out_codec_name))
3593  encoder_name = "native";
3594  }
3595 
3596  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3597  in_codec_name, decoder_name,
3598  out_codec_name, encoder_name);
3599  }
3600  av_log(NULL, AV_LOG_INFO, "\n");
3601  }
3602 
3603  if (ret) {
3604  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3605  return ret;
3606  }
3607 
3608  transcode_init_done = 1;
3609 
3610  return 0;
3611 }
3612 
3613 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3614 static int need_output(void)
3615 {
3616  int i;
3617 
3618  for (i = 0; i < nb_output_streams; i++) {
3619  OutputStream *ost = output_streams[i];
3620  OutputFile *of = output_files[ost->file_index];
3621  AVFormatContext *os = output_files[ost->file_index]->ctx;
3622 
3623  if (ost->finished ||
3624  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3625  continue;
3626  if (ost->frame_number >= ost->max_frames) {
3627  int j;
3628  for (j = 0; j < of->ctx->nb_streams; j++)
3629  close_output_stream(output_streams[of->ost_index + j]);
3630  continue;
3631  }
3632 
3633  return 1;
3634  }
3635 
3636  return 0;
3637 }
3638 
3639 /**
3640  * Select the output stream to process.
3641  *
3642  * @return selected output stream, or NULL if none available
3643  */
3645 {
3646  int i;
3647  int64_t opts_min = INT64_MAX;
3648  OutputStream *ost_min = NULL;
3649 
3650  for (i = 0; i < nb_output_streams; i++) {
3651  OutputStream *ost = output_streams[i];
3652  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3653  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3654  AV_TIME_BASE_Q);
3655  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3656  av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3657 
3658  if (!ost->finished && opts < opts_min) {
3659  opts_min = opts;
3660  ost_min = ost->unavailable ? NULL : ost;
3661  }
3662  }
3663  return ost_min;
3664 }
3665 
3666 static void set_tty_echo(int on)
3667 {
3668 #if HAVE_TERMIOS_H
3669  struct termios tty;
3670  if (tcgetattr(0, &tty) == 0) {
3671  if (on) tty.c_lflag |= ECHO;
3672  else tty.c_lflag &= ~ECHO;
3673  tcsetattr(0, TCSANOW, &tty);
3674  }
3675 #endif
3676 }
3677 
3679 {
3680  int i, ret, key;
3681  static int64_t last_time;
3682  if (received_nb_signals)
3683  return AVERROR_EXIT;
3684  /* read_key() returns 0 on EOF */
3685  if(cur_time - last_time >= 100000 && !run_as_daemon){
3686  key = read_key();
3687  last_time = cur_time;
3688  }else
3689  key = -1;
3690  if (key == 'q')
3691  return AVERROR_EXIT;
3692  if (key == '+') av_log_set_level(av_log_get_level()+10);
3693  if (key == '-') av_log_set_level(av_log_get_level()-10);
3694  if (key == 's') qp_hist ^= 1;
3695  if (key == 'h'){
3696  if (do_hex_dump){
3697  do_hex_dump = do_pkt_dump = 0;
3698  } else if(do_pkt_dump){
3699  do_hex_dump = 1;
3700  } else
3701  do_pkt_dump = 1;
3703  }
3704  if (key == 'c' || key == 'C'){
3705  char buf[4096], target[64], command[256], arg[256] = {0};
3706  double time;
3707  int k, n = 0;
3708  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3709  i = 0;
3710  set_tty_echo(1);
3711  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3712  if (k > 0)
3713  buf[i++] = k;
3714  buf[i] = 0;
3715  set_tty_echo(0);
3716  fprintf(stderr, "\n");
3717  if (k > 0 &&
3718  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3719  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3720  target, time, command, arg);
3721  for (i = 0; i < nb_filtergraphs; i++) {
3722  FilterGraph *fg = filtergraphs[i];
3723  if (fg->graph) {
3724  if (time < 0) {
3725  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3726  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3727  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3728  } else if (key == 'c') {
3729  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3730  ret = AVERROR_PATCHWELCOME;
3731  } else {
3732  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3733  if (ret < 0)
3734  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3735  }
3736  }
3737  }
3738  } else {
3740  "Parse error, at least 3 arguments were expected, "
3741  "only %d given in string '%s'\n", n, buf);
3742  }
3743  }
3744  if (key == 'd' || key == 'D'){
3745  int debug=0;
3746  if(key == 'D') {
3747  debug = input_streams[0]->st->codec->debug<<1;
3748  if(!debug) debug = 1;
3749  while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3750  debug += debug;
3751  }else{
3752  char buf[32];
3753  int k = 0;
3754  i = 0;
3755  set_tty_echo(1);
3756  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3757  if (k > 0)
3758  buf[i++] = k;
3759  buf[i] = 0;
3760  set_tty_echo(0);
3761  fprintf(stderr, "\n");
3762  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3763  fprintf(stderr,"error parsing debug value\n");
3764  }
3765  for(i=0;i<nb_input_streams;i++) {
3766  input_streams[i]->st->codec->debug = debug;
3767  }
3768  for(i=0;i<nb_output_streams;i++) {
3769  OutputStream *ost = output_streams[i];
3770  ost->enc_ctx->debug = debug;
3771  }
3772  if(debug) av_log_set_level(AV_LOG_DEBUG);
3773  fprintf(stderr,"debug=%d\n", debug);
3774  }
3775  if (key == '?'){
3776  fprintf(stderr, "key function\n"
3777  "? show this help\n"
3778  "+ increase verbosity\n"
3779  "- decrease verbosity\n"
3780  "c Send command to first matching filter supporting it\n"
3781  "C Send/Queue command to all matching filters\n"
3782  "D cycle through available debug modes\n"
3783  "h dump packets/hex press to cycle through the 3 states\n"
3784  "q quit\n"
3785  "s Show QP histogram\n"
3786  );
3787  }
3788  return 0;
3789 }
3790 
3791 #if HAVE_PTHREADS
3792 static void *input_thread(void *arg)
3793 {
3794  InputFile *f = arg;
3795  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3796  int ret = 0;
3797 
3798  while (1) {
3799  AVPacket pkt;
3800  ret = av_read_frame(f->ctx, &pkt);
3801 
3802  if (ret == AVERROR(EAGAIN)) {
3803  av_usleep(10000);
3804  continue;
3805  }
3806  if (ret < 0) {
3807  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3808  break;
3809  }
3810  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3811  if (flags && ret == AVERROR(EAGAIN)) {
3812  flags = 0;
3813  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3815  "Thread message queue blocking; consider raising the "
3816  "thread_queue_size option (current value: %d)\n",
3817  f->thread_queue_size);
3818  }
3819  if (ret < 0) {
3820  if (ret != AVERROR_EOF)
3821  av_log(f->ctx, AV_LOG_ERROR,
3822  "Unable to send packet to main thread: %s\n",
3823  av_err2str(ret));
3824  av_packet_unref(&pkt);
3825  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3826  break;
3827  }
3828  }
3829 
3830  return NULL;
3831 }
3832 
3833 static void free_input_threads(void)
3834 {
3835  int i;
3836 
3837  for (i = 0; i < nb_input_files; i++) {
3838  InputFile *f = input_files[i];
3839  AVPacket pkt;
3840 
3841  if (!f || !f->in_thread_queue)
3842  continue;
3844  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3845  av_packet_unref(&pkt);
3846 
3847  pthread_join(f->thread, NULL);
3848  f->joined = 1;
3849  av_thread_message_queue_free(&f->in_thread_queue);
3850  }
3851 }
3852 
3853 static int init_input_threads(void)
3854 {
3855  int i, ret;
3856 
3857  if (nb_input_files == 1)
3858  return 0;
3859 
3860  for (i = 0; i < nb_input_files; i++) {
3861  InputFile *f = input_files[i];
3862 
3863  if (f->ctx->pb ? !f->ctx->pb->seekable :
3864  strcmp(f->ctx->iformat->name, "lavfi"))
3865  f->non_blocking = 1;
3866  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3867  f->thread_queue_size, sizeof(AVPacket));
3868  if (ret < 0)
3869  return ret;
3870 
3871  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3872  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3873  av_thread_message_queue_free(&f->in_thread_queue);
3874  return AVERROR(ret);
3875  }
3876  }
3877  return 0;
3878 }
3879 
3880 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3881 {
3882  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3883  f->non_blocking ?
3885 }
3886 #endif
3887 
3889 {
3890  if (f->rate_emu) {
3891  int i;
3892  for (i = 0; i < f->nb_streams; i++) {
3893  InputStream *ist = input_streams[f->ist_index + i];
3894  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3895  int64_t now = av_gettime_relative() - ist->start;
3896  if (pts > now)
3897  return AVERROR(EAGAIN);
3898  }
3899  }
3900 
3901 #if HAVE_PTHREADS
3902  if (nb_input_files > 1)
3903  return get_input_packet_mt(f, pkt);
3904 #endif
3905  return av_read_frame(f->ctx, pkt);
3906 }
3907 
3908 static int got_eagain(void)
3909 {
3910  int i;
3911  for (i = 0; i < nb_output_streams; i++)
3912  if (output_streams[i]->unavailable)
3913  return 1;
3914  return 0;
3915 }
3916 
3917 static void reset_eagain(void)
3918 {
3919  int i;
3920  for (i = 0; i < nb_input_files; i++)
3921  input_files[i]->eagain = 0;
3922  for (i = 0; i < nb_output_streams; i++)
3923  output_streams[i]->unavailable = 0;
3924 }
3925 
3926 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3927 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3928  AVRational time_base)
3929 {
3930  int ret;
3931 
3932  if (!*duration) {
3933  *duration = tmp;
3934  return tmp_time_base;
3935  }
3936 
3937  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3938  if (ret < 0) {
3939  *duration = tmp;
3940  return tmp_time_base;
3941  }
3942 
3943  return time_base;
3944 }
3945 
3947 {
3948  InputStream *ist;
3949  AVCodecContext *avctx;
3950  int i, ret, has_audio = 0;
3951  int64_t duration = 0;
3952 
3953  ret = av_seek_frame(is, -1, is->start_time, 0);
3954  if (ret < 0)
3955  return ret;
3956 
3957  for (i = 0; i < ifile->nb_streams; i++) {
3958  ist = input_streams[ifile->ist_index + i];
3959  avctx = ist->dec_ctx;
3960 
3961  // flush decoders
3962  if (ist->decoding_needed) {
3963  process_input_packet(ist, NULL, 1);
3964  avcodec_flush_buffers(avctx);
3965  }
3966 
3967  /* duration is the length of the last frame in a stream
3968  * when audio stream is present we don't care about
3969  * last video frame length because it's not defined exactly */
3970  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3971  has_audio = 1;
3972  }
3973 
3974  for (i = 0; i < ifile->nb_streams; i++) {
3975  ist = input_streams[ifile->ist_index + i];
3976  avctx = ist->dec_ctx;
3977 
3978  if (has_audio) {
3979  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3980  AVRational sample_rate = {1, avctx->sample_rate};
3981 
3982  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3983  } else
3984  continue;
3985  } else {
3986  if (ist->framerate.num) {
3987  duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3988  } else if (ist->st->avg_frame_rate.num) {
3989  duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3990  } else duration = 1;
3991  }
3992  if (!ifile->duration)
3993  ifile->time_base = ist->st->time_base;
3994  /* the total duration of the stream, max_pts - min_pts is
3995  * the duration of the stream without the last frame */
3996  duration += ist->max_pts - ist->min_pts;
3997  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3998  ifile->time_base);
3999  }
4000 
4001  if (ifile->loop > 0)
4002  ifile->loop--;
4003 
4004  return ret;
4005 }
4006 
4007 /*
4008  * Return
4009  * - 0 -- one packet was read and processed
4010  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4011  * this function should be called again
4012  * - AVERROR_EOF -- this function should not be called again
4013  */
4014 static int process_input(int file_index)
4015 {
4016  InputFile *ifile = input_files[file_index];
4017  AVFormatContext *is;
4018  InputStream *ist;
4019  AVPacket pkt;
4020  int ret, i, j;
4021  int64_t duration;
4022  int64_t pkt_dts;
4023 
4024  is = ifile->ctx;
4025  ret = get_input_packet(ifile, &pkt);
4026 
4027  if (ret == AVERROR(EAGAIN)) {
4028  ifile->eagain = 1;
4029  return ret;
4030  }
4031  if (ret < 0 && ifile->loop) {
4032  if ((ret = seek_to_start(ifile, is)) < 0)
4033  return ret;
4034  ret = get_input_packet(ifile, &pkt);
4035  if (ret == AVERROR(EAGAIN)) {
4036  ifile->eagain = 1;
4037  return ret;
4038  }
4039  }
4040  if (ret < 0) {
4041  if (ret != AVERROR_EOF) {
4042  print_error(is->filename, ret);
4043  if (exit_on_error)
4044  exit_program(1);
4045  }
4046 
4047  for (i = 0; i < ifile->nb_streams; i++) {
4048  ist = input_streams[ifile->ist_index + i];
4049  if (ist->decoding_needed) {
4050  ret = process_input_packet(ist, NULL, 0);
4051  if (ret>0)
4052  return 0;
4053  }
4054 
4055  /* mark all outputs that don't go through lavfi as finished */
4056  for (j = 0; j < nb_output_streams; j++) {
4057  OutputStream *ost = output_streams[j];
4058 
4059  if (ost->source_index == ifile->ist_index + i &&
4060  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4061  finish_output_stream(ost);
4062  }
4063  }
4064 
4065  ifile->eof_reached = 1;
4066  return AVERROR(EAGAIN);
4067  }
4068 
4069  reset_eagain();
4070 
4071  if (do_pkt_dump) {
4073  is->streams[pkt.stream_index]);
4074  }
4075  /* the following test is needed in case new streams appear
4076  dynamically in stream : we ignore them */
4077  if (pkt.stream_index >= ifile->nb_streams) {
4078  report_new_stream(file_index, &pkt);
4079  goto discard_packet;
4080  }
4081 
4082  ist = input_streams[ifile->ist_index + pkt.stream_index];
4083 
4084  ist->data_size += pkt.size;
4085  ist->nb_packets++;
4086 
4087  if (ist->discard)
4088  goto discard_packet;
4089 
4090  if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4091  av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4092  exit_program(1);
4093  }
4094 
4095  if (debug_ts) {
4096  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4097  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4101  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4102  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4103  av_ts2str(input_files[ist->file_index]->ts_offset),
4104  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4105  }
4106 
4107  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4108  int64_t stime, stime2;
4109  // Correcting starttime based on the enabled streams
4110  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4111  // so we instead do it here as part of discontinuity handling
4112  if ( ist->next_dts == AV_NOPTS_VALUE
4113  && ifile->ts_offset == -is->start_time
4114  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4115  int64_t new_start_time = INT64_MAX;
4116  for (i=0; i<is->nb_streams; i++) {
4117  AVStream *st = is->streams[i];
4118  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4119  continue;
4120  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4121  }
4122  if (new_start_time > is->start_time) {
4123  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4124  ifile->ts_offset = -new_start_time;
4125  }
4126  }
4127 
4128  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4129  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4130  ist->wrap_correction_done = 1;
4131 
4132  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4133  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4134  ist->wrap_correction_done = 0;
4135  }
4136  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4137  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4138  ist->wrap_correction_done = 0;
4139  }
4140  }
4141 
4142  /* add the stream-global side data to the first packet */
4143  if (ist->nb_packets == 1) {
4144  if (ist->st->nb_side_data)
4146  for (i = 0; i < ist->st->nb_side_data; i++) {
4147  AVPacketSideData *src_sd = &ist->st->side_data[i];
4148  uint8_t *dst_data;
4149 
4150  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4151  continue;
4152  if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4153  continue;
4154 
4155  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4156  if (!dst_data)
4157  exit_program(1);
4158 
4159  memcpy(dst_data, src_sd->data, src_sd->size);
4160  }
4161  }
4162 
4163  if (pkt.dts != AV_NOPTS_VALUE)
4164  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4165  if (pkt.pts != AV_NOPTS_VALUE)
4166  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4167 
4168  if (pkt.pts != AV_NOPTS_VALUE)
4169  pkt.pts *= ist->ts_scale;
4170  if (pkt.dts != AV_NOPTS_VALUE)
4171  pkt.dts *= ist->ts_scale;
4172 
4174  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4176  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4177  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4178  int64_t delta = pkt_dts - ifile->last_ts;
4179  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4180  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4181  ifile->ts_offset -= delta;
4183  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4184  delta, ifile->ts_offset);
4185  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4186  if (pkt.pts != AV_NOPTS_VALUE)
4187  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4188  }
4189  }
4190 
4191  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4192  if (pkt.pts != AV_NOPTS_VALUE) {
4193  pkt.pts += duration;
4194  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4195  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4196  }
4197 
4198  if (pkt.dts != AV_NOPTS_VALUE)
4199  pkt.dts += duration;
4200 
4202  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4204  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4205  !copy_ts) {
4206  int64_t delta = pkt_dts - ist->next_dts;
4207  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4208  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4209  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4210  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4211  ifile->ts_offset -= delta;
4213  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4214  delta, ifile->ts_offset);
4215  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4216  if (pkt.pts != AV_NOPTS_VALUE)
4217  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4218  }
4219  } else {
4220  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4221  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4222  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4223  pkt.dts = AV_NOPTS_VALUE;
4224  }
4225  if (pkt.pts != AV_NOPTS_VALUE){
4226  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4227  delta = pkt_pts - ist->next_dts;
4228  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4229  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4230  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4231  pkt.pts = AV_NOPTS_VALUE;
4232  }
4233  }
4234  }
4235  }
4236 
4237  if (pkt.dts != AV_NOPTS_VALUE)
4238  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4239 
4240  if (debug_ts) {
4241  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4243  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4244  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4245  av_ts2str(input_files[ist->file_index]->ts_offset),
4246  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4247  }
4248 
4249  sub2video_heartbeat(ist, pkt.pts);
4250 
4251  process_input_packet(ist, &pkt, 0);
4252 
4253 discard_packet:
4254  av_packet_unref(&pkt);
4255 
4256  return 0;
4257 }
4258 
4259 /**
4260  * Perform a step of transcoding for the specified filter graph.
4261  *
4262  * @param[in] graph filter graph to consider
4263  * @param[out] best_ist input stream where a frame would allow to continue
4264  * @return 0 for success, <0 for error
4265  */
4266 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4267 {
4268  int i, ret;
4269  int nb_requests, nb_requests_max = 0;
4270  InputFilter *ifilter;
4271  InputStream *ist;
4272 
4273  *best_ist = NULL;
4274  ret = avfilter_graph_request_oldest(graph->graph);
4275  if (ret >= 0)
4276  return reap_filters(0);
4277 
4278  if (ret == AVERROR_EOF) {
4279  ret = reap_filters(1);
4280  for (i = 0; i < graph->nb_outputs; i++)
4281  close_output_stream(graph->outputs[i]->ost);
4282  return ret;
4283  }
4284  if (ret != AVERROR(EAGAIN))
4285  return ret;
4286 
4287  for (i = 0; i < graph->nb_inputs; i++) {
4288  ifilter = graph->inputs[i];
4289  ist = ifilter->ist;
4290  if (input_files[ist->file_index]->eagain ||
4291  input_files[ist->file_index]->eof_reached)
4292  continue;
4293  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4294  if (nb_requests > nb_requests_max) {
4295  nb_requests_max = nb_requests;
4296  *best_ist = ist;
4297  }
4298  }
4299 
4300  if (!*best_ist)
4301  for (i = 0; i < graph->nb_outputs; i++)
4302  graph->outputs[i]->ost->unavailable = 1;
4303 
4304  return 0;
4305 }
4306 
4307 /**
4308  * Run a single step of transcoding.
4309  *
4310  * @return 0 for success, <0 for error
4311  */
4312 static int transcode_step(void)
4313 {
4314  OutputStream *ost;
4315  InputStream *ist;
4316  int ret;
4317 
4318  ost = choose_output();
4319  if (!ost) {
4320  if (got_eagain()) {
4321  reset_eagain();
4322  av_usleep(10000);
4323  return 0;
4324  }
4325  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4326  return AVERROR_EOF;
4327  }
4328 
4329  if (ost->filter) {
4330  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4331  return ret;
4332  if (!ist)
4333  return 0;
4334  } else {
4335  av_assert0(ost->source_index >= 0);
4336  ist = input_streams[ost->source_index];
4337  }
4338 
4339  ret = process_input(ist->file_index);
4340  if (ret == AVERROR(EAGAIN)) {
4341  if (input_files[ist->file_index]->eagain)
4342  ost->unavailable = 1;
4343  return 0;
4344  }
4345 
4346  if (ret < 0)
4347  return ret == AVERROR_EOF ? 0 : ret;
4348 
4349  return reap_filters(0);
4350 }
4351 
4352 /*
4353  * The following code is the main loop of the file converter
4354  */
4355 static int transcode(void)
4356 {
4357  int ret, i;
4358  AVFormatContext *os;
4359  OutputStream *ost;
4360  InputStream *ist;
4361  int64_t timer_start;
4362  int64_t total_packets_written = 0;
4363 
4364  ret = transcode_init();
4365  if (ret < 0)
4366  goto fail;
4367 
4368  if (stdin_interaction) {
4369  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4370  }
4371 
4372  timer_start = av_gettime_relative();
4373 
4374 #if HAVE_PTHREADS
4375  if ((ret = init_input_threads()) < 0)
4376  goto fail;
4377 #endif
4378 
4379  while (!received_sigterm) {
4380  int64_t cur_time= av_gettime_relative();
4381 
4382  /* if 'q' pressed, exits */
4383  if (stdin_interaction)
4384  if (check_keyboard_interaction(cur_time) < 0)
4385  break;
4386 
4387  /* check if there's any stream where output is still needed */
4388  if (!need_output()) {
4389  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4390  break;
4391  }
4392 
4393  ret = transcode_step();
4394  if (ret < 0 && ret != AVERROR_EOF) {
4395  char errbuf[128];
4396  av_strerror(ret, errbuf, sizeof(errbuf));
4397 
4398  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4399  break;
4400  }
4401 
4402  /* dump report by using the output first video and audio streams */
4403  print_report(0, timer_start, cur_time);
4404  }
4405 #if HAVE_PTHREADS
4406  free_input_threads();
4407 #endif
4408 
4409  /* at the end of stream, we must flush the decoder buffers */
4410  for (i = 0; i < nb_input_streams; i++) {
4411  ist = input_streams[i];
4412  if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4413  process_input_packet(ist, NULL, 0);
4414  }
4415  }
4416  flush_encoders();
4417 
4418  term_exit();
4419 
4420  /* write the trailer if needed and close file */
4421  for (i = 0; i < nb_output_files; i++) {
4422  os = output_files[i]->ctx;
4423  if (!output_files[i]->header_written) {
4425  "Nothing was written into output file %d (%s), because "
4426  "at least one of its streams received no packets.\n",
4427  i, os->filename);
4428  continue;
4429  }
4430  if ((ret = av_write_trailer(os)) < 0) {
4431  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4432  if (exit_on_error)
4433  exit_program(1);
4434  }
4435  }
4436 
4437  /* dump report by using the first video and audio streams */
4438  print_report(1, timer_start, av_gettime_relative());
4439 
4440  /* close each encoder */
4441  for (i = 0; i < nb_output_streams; i++) {
4442  ost = output_streams[i];
4443  if (ost->encoding_needed) {
4444  av_freep(&ost->enc_ctx->stats_in);
4445  }
4446  total_packets_written += ost->packets_written;
4447  }
4448 
4449  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4450  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4451  exit_program(1);
4452  }
4453 
4454  /* close each decoder */
4455  for (i = 0; i < nb_input_streams; i++) {
4456  ist = input_streams[i];
4457  if (ist->decoding_needed) {
4458  avcodec_close(ist->dec_ctx);
4459  if (ist->hwaccel_uninit)
4460  ist->hwaccel_uninit(ist->dec_ctx);
4461  }
4462  }
4463 
4465 
4466  /* finished ! */
4467  ret = 0;
4468 
4469  fail:
4470 #if HAVE_PTHREADS
4471  free_input_threads();
4472 #endif
4473 
4474  if (output_streams) {
4475  for (i = 0; i < nb_output_streams; i++) {
4476  ost = output_streams[i];
4477  if (ost) {
4478  if (ost->logfile) {
4479  if (fclose(ost->logfile))
4481  "Error closing logfile, loss of information possible: %s\n",
4482  av_err2str(AVERROR(errno)));
4483  ost->logfile = NULL;
4484  }
4485  av_freep(&ost->forced_kf_pts);
4486  av_freep(&ost->apad);
4487  av_freep(&ost->disposition);
4488  av_dict_free(&ost->encoder_opts);
4489  av_dict_free(&ost->sws_dict);
4490  av_dict_free(&ost->swr_opts);
4491  av_dict_free(&ost->resample_opts);
4492  }
4493  }
4494  }
4495  return ret;
4496 }
4497 
4498 
4499 static int64_t getutime(void)
4500 {
4501 #if HAVE_GETRUSAGE
4502  struct rusage rusage;
4503 
4504  getrusage(RUSAGE_SELF, &rusage);
4505  return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4506 #elif HAVE_GETPROCESSTIMES
4507  HANDLE proc;
4508  FILETIME c, e, k, u;
4509  proc = GetCurrentProcess();
4510  GetProcessTimes(proc, &c, &e, &k, &u);
4511  return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4512 #else
4513  return av_gettime_relative();
4514 #endif
4515 }
4516 
4517 static int64_t getmaxrss(void)
4518 {
4519 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4520  struct rusage rusage;
4521  getrusage(RUSAGE_SELF, &rusage);
4522  return (int64_t)rusage.ru_maxrss * 1024;
4523 #elif HAVE_GETPROCESSMEMORYINFO
4524  HANDLE proc;
4525  PROCESS_MEMORY_COUNTERS memcounters;
4526  proc = GetCurrentProcess();
4527  memcounters.cb = sizeof(memcounters);
4528  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4529  return memcounters.PeakPagefileUsage;
4530 #else
4531  return 0;
4532 #endif
4533 }
4534 
4535 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4536 {
4537 }
4538 
4539 int main(int argc, char **argv)
4540 {
4541  int i, ret;
4542  int64_t ti;
4543 
4544  init_dynload();
4545 
4547 
4548  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4549 
4551  parse_loglevel(argc, argv, options);
4552 
4553  if(argc>1 && !strcmp(argv[1], "-d")){
4554  run_as_daemon=1;
4556  argc--;
4557  argv++;
4558  }
4559 
4561 #if CONFIG_AVDEVICE
4563 #endif
4565  av_register_all();
4567 
4568  show_banner(argc, argv, options);
4569 
4570  /* parse options and open all input/output files */
4571  ret = ffmpeg_parse_options(argc, argv);
4572  if (ret < 0)
4573  exit_program(1);
4574 
4575  if (nb_output_files <= 0 && nb_input_files == 0) {
4576  show_usage();
4577  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4578  exit_program(1);
4579  }
4580 
4581  /* file converter / grab */
4582  if (nb_output_files <= 0) {
4583  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4584  exit_program(1);
4585  }
4586 
4587 // if (nb_input_files == 0) {
4588 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4589 // exit_program(1);
4590 // }
4591 
4592  for (i = 0; i < nb_output_files; i++) {
4593  if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4594  want_sdp = 0;
4595  }
4596 
4597  current_time = ti = getutime();
4598  if (transcode() < 0)
4599  exit_program(1);
4600  ti = getutime() - ti;
4601  if (do_benchmark) {
4602  av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4603  }
4604  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4607  exit_program(69);
4608 
4610  return main_return_code;
4611 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1546
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:307
int nb_bitstream_filters
Definition: ffmpeg.h:450
#define extra_bits(eb)
Definition: intrax8.c:159
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:903
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:119
uint32_t BOOL
#define FF_DEBUG_VIS_MB_TYPE
only access through AVOptions from outside libavcodec
Definition: avcodec.h:2954
int got_output
Definition: ffmpeg.h:335
#define AV_DISPOSITION_METADATA
Definition: avformat.h:873
void av_bsf_free(AVBSFContext **ctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:36
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:1892
int64_t first_dts
Timestamp corresponding to the last dts sync point.
Definition: avformat.h:1063
int guess_input_channel_layout(InputStream *ist)
Definition: ffmpeg.c:2005
#define NULL
Definition: coverity.c:32
int frame_number
Definition: ffmpeg.h:439
const struct AVCodec * codec
Definition: avcodec.h:1699
Definition: ffmpeg.h:416
AVRational framerate
Definition: avcodec.h:3390
enum AVFieldOrder field_order
Video only.
Definition: avcodec.h:4106
static void do_subtitle_out(OutputFile *of, OutputStream *ost, AVSubtitle *sub)
Definition: ffmpeg.c:925
enum HWAccelID active_hwaccel_id
Definition: ffmpeg.h:362
const char * s
Definition: avisynth_c.h:768
Bytestream IO Context.
Definition: avio.h:147
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static enum AVPixelFormat pix_fmt
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:546
void term_init(void)
Definition: ffmpeg.c:369
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:319
AVCodecParameters * par_out
Parameters of the output stream.
Definition: avcodec.h:5807
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
int64_t av_frame_get_pkt_duration(const AVFrame *frame)
uint8_t * name
Definition: ffmpeg.h:254
int nb_outputs
Definition: ffmpeg.h:283
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:124