FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdint.h>
34 
35 #if HAVE_IO_H
36 #include <io.h>
37 #endif
38 #if HAVE_UNISTD_H
39 #include <unistd.h>
40 #endif
41 
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
45 #include "libavutil/opt.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
64 
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
68 
69 #if HAVE_SYS_RESOURCE_H
70 #include <sys/time.h>
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
74 #include <windows.h>
75 #endif
76 #if HAVE_GETPROCESSMEMORYINFO
77 #include <windows.h>
78 #include <psapi.h>
79 #endif
80 #if HAVE_SETCONSOLECTRLHANDLER
81 #include <windows.h>
82 #endif
83 
84 
85 #if HAVE_SYS_SELECT_H
86 #include <sys/select.h>
87 #endif
88 
89 #if HAVE_TERMIOS_H
90 #include <fcntl.h>
91 #include <sys/ioctl.h>
92 #include <sys/time.h>
93 #include <termios.h>
94 #elif HAVE_KBHIT
95 #include <conio.h>
96 #endif
97 
98 #if HAVE_PTHREADS
99 #include <pthread.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
126 
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
131 
132 static int current_time;
134 
136 
141 
146 
149 
150 #if HAVE_TERMIOS_H
151 
152 /* init terminal so that we can grab keys */
153 static struct termios oldtty;
154 static int restore_tty;
155 #endif
156 
157 #if HAVE_PTHREADS
158 static void free_input_threads(void);
159 #endif
160 
161 /* sub2video hack:
162  Convert subtitles to video with alpha to insert them in filter graphs.
163  This is a temporary solution until libavfilter gets real subtitles support.
164  */
165 
167 {
168  int ret;
169  AVFrame *frame = ist->sub2video.frame;
170 
171  av_frame_unref(frame);
172  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
173  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
175  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
176  return ret;
177  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
178  return 0;
179 }
180 
181 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
182  AVSubtitleRect *r)
183 {
184  uint32_t *pal, *dst2;
185  uint8_t *src, *src2;
186  int x, y;
187 
188  if (r->type != SUBTITLE_BITMAP) {
189  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
190  return;
191  }
192  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
194  r->x, r->y, r->w, r->h, w, h
195  );
196  return;
197  }
198 
199  dst += r->y * dst_linesize + r->x * 4;
200  src = r->data[0];
201  pal = (uint32_t *)r->data[1];
202  for (y = 0; y < r->h; y++) {
203  dst2 = (uint32_t *)dst;
204  src2 = src;
205  for (x = 0; x < r->w; x++)
206  *(dst2++) = pal[*(src2++)];
207  dst += dst_linesize;
208  src += r->linesize[0];
209  }
210 }
211 
212 static void sub2video_push_ref(InputStream *ist, int64_t pts)
213 {
214  AVFrame *frame = ist->sub2video.frame;
215  int i;
216 
217  av_assert1(frame->data[0]);
218  ist->sub2video.last_pts = frame->pts = pts;
219  for (i = 0; i < ist->nb_filters; i++)
223 }
224 
225 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
226 {
227  AVFrame *frame = ist->sub2video.frame;
228  int8_t *dst;
229  int dst_linesize;
230  int num_rects, i;
231  int64_t pts, end_pts;
232 
233  if (!frame)
234  return;
235  if (sub) {
236  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
237  AV_TIME_BASE_Q, ist->st->time_base);
238  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
239  AV_TIME_BASE_Q, ist->st->time_base);
240  num_rects = sub->num_rects;
241  } else {
242  pts = ist->sub2video.end_pts;
243  end_pts = INT64_MAX;
244  num_rects = 0;
245  }
246  if (sub2video_get_blank_frame(ist) < 0) {
248  "Impossible to get a blank canvas.\n");
249  return;
250  }
251  dst = frame->data [0];
252  dst_linesize = frame->linesize[0];
253  for (i = 0; i < num_rects; i++)
254  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
255  sub2video_push_ref(ist, pts);
256  ist->sub2video.end_pts = end_pts;
257 }
258 
259 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
260 {
261  InputFile *infile = input_files[ist->file_index];
262  int i, j, nb_reqs;
263  int64_t pts2;
264 
265  /* When a frame is read from a file, examine all sub2video streams in
266  the same file and send the sub2video frame again. Otherwise, decoded
267  video frames could be accumulating in the filter graph while a filter
268  (possibly overlay) is desperately waiting for a subtitle frame. */
269  for (i = 0; i < infile->nb_streams; i++) {
270  InputStream *ist2 = input_streams[infile->ist_index + i];
271  if (!ist2->sub2video.frame)
272  continue;
273  /* subtitles seem to be usually muxed ahead of other streams;
274  if not, subtracting a larger time here is necessary */
275  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
276  /* do not send the heartbeat frame if the subtitle is already ahead */
277  if (pts2 <= ist2->sub2video.last_pts)
278  continue;
279  if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
280  sub2video_update(ist2, NULL);
281  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
282  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
283  if (nb_reqs)
284  sub2video_push_ref(ist2, pts2);
285  }
286 }
287 
288 static void sub2video_flush(InputStream *ist)
289 {
290  int i;
291 
292  if (ist->sub2video.end_pts < INT64_MAX)
293  sub2video_update(ist, NULL);
294  for (i = 0; i < ist->nb_filters; i++)
296 }
297 
298 /* end of sub2video hack */
299 
300 static void term_exit_sigsafe(void)
301 {
302 #if HAVE_TERMIOS_H
303  if(restore_tty)
304  tcsetattr (0, TCSANOW, &oldtty);
305 #endif
306 }
307 
308 void term_exit(void)
309 {
310  av_log(NULL, AV_LOG_QUIET, "%s", "");
312 }
313 
314 static volatile int received_sigterm = 0;
315 static volatile int received_nb_signals = 0;
316 static volatile int transcode_init_done = 0;
317 static volatile int ffmpeg_exited = 0;
318 static int main_return_code = 0;
319 
320 static void
322 {
323  received_sigterm = sig;
326  if(received_nb_signals > 3) {
327  write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
328  strlen("Received > 3 system signals, hard exiting\n"));
329 
330  exit(123);
331  }
332 }
333 
334 #if HAVE_SETCONSOLECTRLHANDLER
335 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
336 {
337  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
338 
339  switch (fdwCtrlType)
340  {
341  case CTRL_C_EVENT:
342  case CTRL_BREAK_EVENT:
343  sigterm_handler(SIGINT);
344  return TRUE;
345 
346  case CTRL_CLOSE_EVENT:
347  case CTRL_LOGOFF_EVENT:
348  case CTRL_SHUTDOWN_EVENT:
349  sigterm_handler(SIGTERM);
350  /* Basically, with these 3 events, when we return from this method the
351  process is hard terminated, so stall as long as we need to
352  to try and let the main thread(s) clean up and gracefully terminate
353  (we have at most 5 seconds, but should be done far before that). */
354  while (!ffmpeg_exited) {
355  Sleep(0);
356  }
357  return TRUE;
358 
359  default:
360  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
361  return FALSE;
362  }
363 }
364 #endif
365 
366 void term_init(void)
367 {
368 #if HAVE_TERMIOS_H
369  if(!run_as_daemon){
370  struct termios tty;
371  if (tcgetattr (0, &tty) == 0) {
372  oldtty = tty;
373  restore_tty = 1;
374 
375  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
376  |INLCR|IGNCR|ICRNL|IXON);
377  tty.c_oflag |= OPOST;
378  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
379  tty.c_cflag &= ~(CSIZE|PARENB);
380  tty.c_cflag |= CS8;
381  tty.c_cc[VMIN] = 1;
382  tty.c_cc[VTIME] = 0;
383 
384  tcsetattr (0, TCSANOW, &tty);
385  }
386  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
387  }
388 #endif
389 
390  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
391  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
392 #ifdef SIGXCPU
393  signal(SIGXCPU, sigterm_handler);
394 #endif
395 #if HAVE_SETCONSOLECTRLHANDLER
396  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
397 #endif
398 }
399 
400 /* read a key without blocking */
401 static int read_key(void)
402 {
403  unsigned char ch;
404 #if HAVE_TERMIOS_H
405  int n = 1;
406  struct timeval tv;
407  fd_set rfds;
408 
409  FD_ZERO(&rfds);
410  FD_SET(0, &rfds);
411  tv.tv_sec = 0;
412  tv.tv_usec = 0;
413  n = select(1, &rfds, NULL, NULL, &tv);
414  if (n > 0) {
415  n = read(0, &ch, 1);
416  if (n == 1)
417  return ch;
418 
419  return n;
420  }
421 #elif HAVE_KBHIT
422 # if HAVE_PEEKNAMEDPIPE
423  static int is_pipe;
424  static HANDLE input_handle;
425  DWORD dw, nchars;
426  if(!input_handle){
427  input_handle = GetStdHandle(STD_INPUT_HANDLE);
428  is_pipe = !GetConsoleMode(input_handle, &dw);
429  }
430 
431  if (is_pipe) {
432  /* When running under a GUI, you will end here. */
433  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
434  // input pipe may have been closed by the program that ran ffmpeg
435  return -1;
436  }
437  //Read it
438  if(nchars != 0) {
439  read(0, &ch, 1);
440  return ch;
441  }else{
442  return -1;
443  }
444  }
445 # endif
446  if(kbhit())
447  return(getch());
448 #endif
449  return -1;
450 }
451 
452 static int decode_interrupt_cb(void *ctx)
453 {
455 }
456 
458 
459 static void ffmpeg_cleanup(int ret)
460 {
461  int i, j;
462 
463  if (do_benchmark) {
464  int maxrss = getmaxrss() / 1024;
465  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
466  }
467 
468  for (i = 0; i < nb_filtergraphs; i++) {
469  FilterGraph *fg = filtergraphs[i];
471  for (j = 0; j < fg->nb_inputs; j++) {
472  av_freep(&fg->inputs[j]->name);
473  av_freep(&fg->inputs[j]);
474  }
475  av_freep(&fg->inputs);
476  for (j = 0; j < fg->nb_outputs; j++) {
477  av_freep(&fg->outputs[j]->name);
478  av_freep(&fg->outputs[j]);
479  }
480  av_freep(&fg->outputs);
481  av_freep(&fg->graph_desc);
482 
483  av_freep(&filtergraphs[i]);
484  }
485  av_freep(&filtergraphs);
486 
488 
489  /* close files */
490  for (i = 0; i < nb_output_files; i++) {
491  OutputFile *of = output_files[i];
493  if (!of)
494  continue;
495  s = of->ctx;
496  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
497  avio_closep(&s->pb);
499  av_dict_free(&of->opts);
500 
501  av_freep(&output_files[i]);
502  }
503  for (i = 0; i < nb_output_streams; i++) {
504  OutputStream *ost = output_streams[i];
505 
506  if (!ost)
507  continue;
508 
509  for (j = 0; j < ost->nb_bitstream_filters; j++)
510  av_bsf_free(&ost->bsf_ctx[j]);
511  av_freep(&ost->bsf_ctx);
513 
515  av_frame_free(&ost->last_frame);
516  av_dict_free(&ost->encoder_opts);
517 
518  av_parser_close(ost->parser);
520 
521  av_freep(&ost->forced_keyframes);
523  av_freep(&ost->avfilter);
524  av_freep(&ost->logfile_prefix);
525 
527  ost->audio_channels_mapped = 0;
528 
529  av_dict_free(&ost->sws_dict);
530 
533 
534  av_freep(&output_streams[i]);
535  }
536 #if HAVE_PTHREADS
537  free_input_threads();
538 #endif
539  for (i = 0; i < nb_input_files; i++) {
540  avformat_close_input(&input_files[i]->ctx);
541  av_freep(&input_files[i]);
542  }
543  for (i = 0; i < nb_input_streams; i++) {
544  InputStream *ist = input_streams[i];
545 
548  av_dict_free(&ist->decoder_opts);
551  av_freep(&ist->filters);
552  av_freep(&ist->hwaccel_device);
553 
555 
556  av_freep(&input_streams[i]);
557  }
558 
559  if (vstats_file) {
560  if (fclose(vstats_file))
562  "Error closing vstats file, loss of information possible: %s\n",
563  av_err2str(AVERROR(errno)));
564  }
566 
567  av_freep(&input_streams);
568  av_freep(&input_files);
569  av_freep(&output_streams);
570  av_freep(&output_files);
571 
572  uninit_opts();
573 
575 
576  if (received_sigterm) {
577  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
578  (int) received_sigterm);
579  } else if (ret && transcode_init_done) {
580  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
581  }
582  term_exit();
583  ffmpeg_exited = 1;
584 }
585 
587 {
588  AVDictionaryEntry *t = NULL;
589 
590  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
592  }
593 }
594 
596 {
598  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
599  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
600  exit_program(1);
601  }
602 }
603 
604 static void abort_codec_experimental(AVCodec *c, int encoder)
605 {
606  exit_program(1);
607 }
608 
609 static void update_benchmark(const char *fmt, ...)
610 {
611  if (do_benchmark_all) {
612  int64_t t = getutime();
613  va_list va;
614  char buf[1024];
615 
616  if (fmt) {
617  va_start(va, fmt);
618  vsnprintf(buf, sizeof(buf), fmt, va);
619  va_end(va);
620  av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
621  }
622  current_time = t;
623  }
624 }
625 
626 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
627 {
628  int i;
629  for (i = 0; i < nb_output_streams; i++) {
630  OutputStream *ost2 = output_streams[i];
631  ost2->finished |= ost == ost2 ? this_stream : others;
632  }
633 }
634 
636 {
637  AVStream *st = ost->st;
638  int ret;
639 
642  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
643 
644  /*
645  * Audio encoders may split the packets -- #frames in != #packets out.
646  * But there is no reordering, so we can limit the number of output packets
647  * by simply dropping them here.
648  * Counting encoded video frames needs to be done separately because of
649  * reordering, see do_video_out()
650  */
651  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
652  if (ost->frame_number >= ost->max_frames) {
653  av_packet_unref(pkt);
654  return;
655  }
656  ost->frame_number++;
657  }
658  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
659  int i;
661  NULL);
662  ost->quality = sd ? AV_RL32(sd) : -1;
663  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
664 
665  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
666  if (sd && i < sd[5])
667  ost->error[i] = AV_RL64(sd + 8 + 8*i);
668  else
669  ost->error[i] = -1;
670  }
671 
672  if (ost->frame_rate.num && ost->is_cfr) {
673  if (pkt->duration > 0)
674  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
675  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
676  ost->st->time_base);
677  }
678  }
679 
680  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
681  if (pkt->dts != AV_NOPTS_VALUE &&
682  pkt->pts != AV_NOPTS_VALUE &&
683  pkt->dts > pkt->pts) {
684  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
685  pkt->dts, pkt->pts,
686  ost->file_index, ost->st->index);
687  pkt->pts =
688  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
689  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
690  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
691  }
693  pkt->dts != AV_NOPTS_VALUE &&
694  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
695  ost->last_mux_dts != AV_NOPTS_VALUE) {
696  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
697  if (pkt->dts < max) {
698  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
699  av_log(s, loglevel, "Non-monotonous DTS in output stream "
700  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
701  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
702  if (exit_on_error) {
703  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
704  exit_program(1);
705  }
706  av_log(s, loglevel, "changing to %"PRId64". This may result "
707  "in incorrect timestamps in the output file.\n",
708  max);
709  if (pkt->pts >= pkt->dts)
710  pkt->pts = FFMAX(pkt->pts, max);
711  pkt->dts = max;
712  }
713  }
714  }
715  ost->last_mux_dts = pkt->dts;
716 
717  ost->data_size += pkt->size;
718  ost->packets_written++;
719 
720  pkt->stream_index = ost->index;
721 
722  if (debug_ts) {
723  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
724  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
726  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
727  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
728  pkt->size
729  );
730  }
731 
732  ret = av_interleaved_write_frame(s, pkt);
733  if (ret < 0) {
734  print_error("av_interleaved_write_frame()", ret);
735  main_return_code = 1;
737  }
738  av_packet_unref(pkt);
739 }
740 
742 {
743  OutputFile *of = output_files[ost->file_index];
744 
745  ost->finished |= ENCODER_FINISHED;
746  if (of->shortest) {
747  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
748  of->recording_time = FFMIN(of->recording_time, end);
749  }
750 }
751 
753 {
754  int ret = 0;
755 
756  /* apply the output bitstream filters, if any */
757  if (ost->nb_bitstream_filters) {
758  int idx;
759 
760  ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
761  if (ret < 0)
762  goto finish;
763 
764  idx = 1;
765  while (idx) {
766  /* get a packet from the previous filter up the chain */
767  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
768  /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
769  * the api states this shouldn't happen after init(). Propagate it here to the
770  * muxer and to the next filters in the chain to workaround this.
771  * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
772  * par_out->extradata and adapt muxers accordingly to get rid of this. */
773  if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
774  ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
775  if (ret < 0)
776  goto finish;
777  ost->bsf_extradata_updated[idx - 1] |= 1;
778  }
779  if (ret == AVERROR(EAGAIN)) {
780  ret = 0;
781  idx--;
782  continue;
783  } else if (ret < 0)
784  goto finish;
785 
786  /* send it to the next filter down the chain or to the muxer */
787  if (idx < ost->nb_bitstream_filters) {
788  /* HACK/FIXME! - See above */
789  if (!(ost->bsf_extradata_updated[idx] & 2)) {
790  ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
791  if (ret < 0)
792  goto finish;
793  ost->bsf_extradata_updated[idx] |= 2;
794  }
795  ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
796  if (ret < 0)
797  goto finish;
798  idx++;
799  } else
800  write_packet(s, pkt, ost);
801  }
802  } else
803  write_packet(s, pkt, ost);
804 
805 finish:
806  if (ret < 0 && ret != AVERROR_EOF) {
807  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
808  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
809  if(exit_on_error)
810  exit_program(1);
811  }
812 }
813 
815 {
816  OutputFile *of = output_files[ost->file_index];
817 
818  if (of->recording_time != INT64_MAX &&
820  AV_TIME_BASE_Q) >= 0) {
821  close_output_stream(ost);
822  return 0;
823  }
824  return 1;
825 }
826 
828  AVFrame *frame)
829 {
830  AVCodecContext *enc = ost->enc_ctx;
831  AVPacket pkt;
832  int got_packet = 0;
833 
834  av_init_packet(&pkt);
835  pkt.data = NULL;
836  pkt.size = 0;
837 
838  if (!check_recording_time(ost))
839  return;
840 
841  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
842  frame->pts = ost->sync_opts;
843  ost->sync_opts = frame->pts + frame->nb_samples;
844  ost->samples_encoded += frame->nb_samples;
845  ost->frames_encoded++;
846 
847  av_assert0(pkt.size || !pkt.data);
849  if (debug_ts) {
850  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
851  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
852  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
853  enc->time_base.num, enc->time_base.den);
854  }
855 
856  if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
857  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
858  exit_program(1);
859  }
860  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
861 
862  if (got_packet) {
863  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
864 
865  if (debug_ts) {
866  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
867  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
868  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
869  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
870  }
871 
872  output_packet(s, &pkt, ost);
873  }
874 }
875 
877  OutputStream *ost,
878  InputStream *ist,
879  AVSubtitle *sub)
880 {
881  int subtitle_out_max_size = 1024 * 1024;
882  int subtitle_out_size, nb, i;
883  AVCodecContext *enc;
884  AVPacket pkt;
885  int64_t pts;
886 
887  if (sub->pts == AV_NOPTS_VALUE) {
888  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
889  if (exit_on_error)
890  exit_program(1);
891  return;
892  }
893 
894  enc = ost->enc_ctx;
895 
896  if (!subtitle_out) {
897  subtitle_out = av_malloc(subtitle_out_max_size);
898  if (!subtitle_out) {
899  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
900  exit_program(1);
901  }
902  }
903 
904  /* Note: DVB subtitle need one packet to draw them and one other
905  packet to clear them */
906  /* XXX: signal it in the codec context ? */
908  nb = 2;
909  else
910  nb = 1;
911 
912  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
913  pts = sub->pts;
914  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
915  pts -= output_files[ost->file_index]->start_time;
916  for (i = 0; i < nb; i++) {
917  unsigned save_num_rects = sub->num_rects;
918 
919  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
920  if (!check_recording_time(ost))
921  return;
922 
923  sub->pts = pts;
924  // start_display_time is required to be 0
925  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
927  sub->start_display_time = 0;
928  if (i == 1)
929  sub->num_rects = 0;
930 
931  ost->frames_encoded++;
932 
933  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
934  subtitle_out_max_size, sub);
935  if (i == 1)
936  sub->num_rects = save_num_rects;
937  if (subtitle_out_size < 0) {
938  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
939  exit_program(1);
940  }
941 
942  av_init_packet(&pkt);
943  pkt.data = subtitle_out;
944  pkt.size = subtitle_out_size;
945  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
946  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
947  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
948  /* XXX: the pts correction is handled here. Maybe handling
949  it in the codec would be better */
950  if (i == 0)
951  pkt.pts += 90 * sub->start_display_time;
952  else
953  pkt.pts += 90 * sub->end_display_time;
954  }
955  pkt.dts = pkt.pts;
956  output_packet(s, &pkt, ost);
957  }
958 }
959 
961  OutputStream *ost,
962  AVFrame *next_picture,
963  double sync_ipts)
964 {
965  int ret, format_video_sync;
966  AVPacket pkt;
967  AVCodecContext *enc = ost->enc_ctx;
968  AVCodecParameters *mux_par = ost->st->codecpar;
969  int nb_frames, nb0_frames, i;
970  double delta, delta0;
971  double duration = 0;
972  int frame_size = 0;
973  InputStream *ist = NULL;
975 
976  if (ost->source_index >= 0)
977  ist = input_streams[ost->source_index];
978 
979  if (filter->inputs[0]->frame_rate.num > 0 &&
980  filter->inputs[0]->frame_rate.den > 0)
981  duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
982 
983  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
984  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
985 
986  if (!ost->filters_script &&
987  !ost->filters &&
988  next_picture &&
989  ist &&
990  lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
991  duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
992  }
993 
994  if (!next_picture) {
995  //end, flushing
996  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
997  ost->last_nb0_frames[1],
998  ost->last_nb0_frames[2]);
999  } else {
1000  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1001  delta = delta0 + duration;
1002 
1003  /* by default, we output a single frame */
1004  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1005  nb_frames = 1;
1006 
1007  format_video_sync = video_sync_method;
1008  if (format_video_sync == VSYNC_AUTO) {
1009  if(!strcmp(s->oformat->name, "avi")) {
1010  format_video_sync = VSYNC_VFR;
1011  } else
1013  if ( ist
1014  && format_video_sync == VSYNC_CFR
1015  && input_files[ist->file_index]->ctx->nb_streams == 1
1016  && input_files[ist->file_index]->input_ts_offset == 0) {
1017  format_video_sync = VSYNC_VSCFR;
1018  }
1019  if (format_video_sync == VSYNC_CFR && copy_ts) {
1020  format_video_sync = VSYNC_VSCFR;
1021  }
1022  }
1023  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1024 
1025  if (delta0 < 0 &&
1026  delta > 0 &&
1027  format_video_sync != VSYNC_PASSTHROUGH &&
1028  format_video_sync != VSYNC_DROP) {
1029  if (delta0 < -0.6) {
1030  av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1031  } else
1032  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1033  sync_ipts = ost->sync_opts;
1034  duration += delta0;
1035  delta0 = 0;
1036  }
1037 
1038  switch (format_video_sync) {
1039  case VSYNC_VSCFR:
1040  if (ost->frame_number == 0 && delta0 >= 0.5) {
1041  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1042  delta = duration;
1043  delta0 = 0;
1044  ost->sync_opts = lrint(sync_ipts);
1045  }
1046  case VSYNC_CFR:
1047  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1048  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1049  nb_frames = 0;
1050  } else if (delta < -1.1)
1051  nb_frames = 0;
1052  else if (delta > 1.1) {
1053  nb_frames = lrintf(delta);
1054  if (delta0 > 1.1)
1055  nb0_frames = lrintf(delta0 - 0.6);
1056  }
1057  break;
1058  case VSYNC_VFR:
1059  if (delta <= -0.6)
1060  nb_frames = 0;
1061  else if (delta > 0.6)
1062  ost->sync_opts = lrint(sync_ipts);
1063  break;
1064  case VSYNC_DROP:
1065  case VSYNC_PASSTHROUGH:
1066  ost->sync_opts = lrint(sync_ipts);
1067  break;
1068  default:
1069  av_assert0(0);
1070  }
1071  }
1072 
1073  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1074  nb0_frames = FFMIN(nb0_frames, nb_frames);
1075 
1076  memmove(ost->last_nb0_frames + 1,
1077  ost->last_nb0_frames,
1078  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1079  ost->last_nb0_frames[0] = nb0_frames;
1080 
1081  if (nb0_frames == 0 && ost->last_dropped) {
1082  nb_frames_drop++;
1084  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1085  ost->frame_number, ost->st->index, ost->last_frame->pts);
1086  }
1087  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1088  if (nb_frames > dts_error_threshold * 30) {
1089  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1090  nb_frames_drop++;
1091  return;
1092  }
1093  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1094  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1095  }
1096  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1097 
1098  /* duplicates frame if needed */
1099  for (i = 0; i < nb_frames; i++) {
1100  AVFrame *in_picture;
1101  av_init_packet(&pkt);
1102  pkt.data = NULL;
1103  pkt.size = 0;
1104 
1105  if (i < nb0_frames && ost->last_frame) {
1106  in_picture = ost->last_frame;
1107  } else
1108  in_picture = next_picture;
1109 
1110  if (!in_picture)
1111  return;
1112 
1113  in_picture->pts = ost->sync_opts;
1114 
1115 #if 1
1116  if (!check_recording_time(ost))
1117 #else
1118  if (ost->frame_number >= ost->max_frames)
1119 #endif
1120  return;
1121 
1122 #if FF_API_LAVF_FMT_RAWPICTURE
1123  if (s->oformat->flags & AVFMT_RAWPICTURE &&
1124  enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1125  /* raw pictures are written as AVPicture structure to
1126  avoid any copies. We support temporarily the older
1127  method. */
1128  if (in_picture->interlaced_frame)
1129  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1130  else
1131  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1132  pkt.data = (uint8_t *)in_picture;
1133  pkt.size = sizeof(AVPicture);
1134  pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1135  pkt.flags |= AV_PKT_FLAG_KEY;
1136 
1137  output_packet(s, &pkt, ost);
1138  } else
1139 #endif
1140  {
1141  int got_packet, forced_keyframe = 0;
1142  double pts_time;
1143 
1145  ost->top_field_first >= 0)
1146  in_picture->top_field_first = !!ost->top_field_first;
1147 
1148  if (in_picture->interlaced_frame) {
1149  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1150  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1151  else
1152  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1153  } else
1154  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1155 
1156  in_picture->quality = enc->global_quality;
1157  in_picture->pict_type = 0;
1158 
1159  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1160  in_picture->pts * av_q2d(enc->time_base) : NAN;
1161  if (ost->forced_kf_index < ost->forced_kf_count &&
1162  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1163  ost->forced_kf_index++;
1164  forced_keyframe = 1;
1165  } else if (ost->forced_keyframes_pexpr) {
1166  double res;
1167  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1170  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1176  res);
1177  if (res) {
1178  forced_keyframe = 1;
1184  }
1185 
1187  } else if ( ost->forced_keyframes
1188  && !strncmp(ost->forced_keyframes, "source", 6)
1189  && in_picture->key_frame==1) {
1190  forced_keyframe = 1;
1191  }
1192 
1193  if (forced_keyframe) {
1194  in_picture->pict_type = AV_PICTURE_TYPE_I;
1195  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1196  }
1197 
1199  if (debug_ts) {
1200  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1201  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1202  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1203  enc->time_base.num, enc->time_base.den);
1204  }
1205 
1206  ost->frames_encoded++;
1207 
1208  ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1209  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1210  if (ret < 0) {
1211  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1212  exit_program(1);
1213  }
1214 
1215  if (got_packet) {
1216  if (debug_ts) {
1217  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1218  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1219  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1220  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1221  }
1222 
1223  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1224  pkt.pts = ost->sync_opts;
1225 
1226  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1227 
1228  if (debug_ts) {
1229  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1230  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1231  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1232  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1233  }
1234 
1235  frame_size = pkt.size;
1236  output_packet(s, &pkt, ost);
1237 
1238  /* if two pass, output log */
1239  if (ost->logfile && enc->stats_out) {
1240  fprintf(ost->logfile, "%s", enc->stats_out);
1241  }
1242  }
1243  }
1244  ost->sync_opts++;
1245  /*
1246  * For video, number of frames in == number of packets out.
1247  * But there may be reordering, so we can't throw away frames on encoder
1248  * flush, we need to limit them here, before they go into encoder.
1249  */
1250  ost->frame_number++;
1251 
1252  if (vstats_filename && frame_size)
1253  do_video_stats(ost, frame_size);
1254  }
1255 
1256  if (!ost->last_frame)
1257  ost->last_frame = av_frame_alloc();
1258  av_frame_unref(ost->last_frame);
1259  if (next_picture && ost->last_frame)
1260  av_frame_ref(ost->last_frame, next_picture);
1261  else
1262  av_frame_free(&ost->last_frame);
1263 }
1264 
1265 static double psnr(double d)
1266 {
1267  return -10.0 * log10(d);
1268 }
1269 
1271 {
1272  AVCodecContext *enc;
1273  int frame_number;
1274  double ti1, bitrate, avg_bitrate;
1275 
1276  /* this is executed just the first time do_video_stats is called */
1277  if (!vstats_file) {
1278  vstats_file = fopen(vstats_filename, "w");
1279  if (!vstats_file) {
1280  perror("fopen");
1281  exit_program(1);
1282  }
1283  }
1284 
1285  enc = ost->enc_ctx;
1286  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1287  frame_number = ost->st->nb_frames;
1288  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1289  ost->quality / (float)FF_QP2LAMBDA);
1290 
1291  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1292  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1293 
1294  fprintf(vstats_file,"f_size= %6d ", frame_size);
1295  /* compute pts value */
1296  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1297  if (ti1 < 0.01)
1298  ti1 = 0.01;
1299 
1300  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1301  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1302  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1303  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1304  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1305  }
1306 }
1307 
1309 {
1310  OutputFile *of = output_files[ost->file_index];
1311  int i;
1312 
1314 
1315  if (of->shortest) {
1316  for (i = 0; i < of->ctx->nb_streams; i++)
1317  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1318  }
1319 }
1320 
1321 /**
1322  * Get and encode new output from any of the filtergraphs, without causing
1323  * activity.
1324  *
1325  * @return 0 for success, <0 for severe errors
1326  */
1327 static int reap_filters(int flush)
1328 {
1329  AVFrame *filtered_frame = NULL;
1330  int i;
1331 
1332  /* Reap all buffers present in the buffer sinks */
1333  for (i = 0; i < nb_output_streams; i++) {
1334  OutputStream *ost = output_streams[i];
1335  OutputFile *of = output_files[ost->file_index];
1337  AVCodecContext *enc = ost->enc_ctx;
1338  int ret = 0;
1339 
1340  if (!ost->filter)
1341  continue;
1342  filter = ost->filter->filter;
1343 
1344  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1345  return AVERROR(ENOMEM);
1346  }
1347  filtered_frame = ost->filtered_frame;
1348 
1349  while (1) {
1350  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1351  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1353  if (ret < 0) {
1354  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1356  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1357  } else if (flush && ret == AVERROR_EOF) {
1358  if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1359  do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1360  }
1361  break;
1362  }
1363  if (ost->finished) {
1364  av_frame_unref(filtered_frame);
1365  continue;
1366  }
1367  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1368  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1369  AVRational tb = enc->time_base;
1370  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1371 
1372  tb.den <<= extra_bits;
1373  float_pts =
1374  av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1375  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1376  float_pts /= 1 << extra_bits;
1377  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1378  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1379 
1380  filtered_frame->pts =
1381  av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1382  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1383  }
1384  //if (ost->source_index >= 0)
1385  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1386 
1387  switch (filter->inputs[0]->type) {
1388  case AVMEDIA_TYPE_VIDEO:
1389  if (!ost->frame_aspect_ratio.num)
1390  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1391 
1392  if (debug_ts) {
1393  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1394  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1395  float_pts,
1396  enc->time_base.num, enc->time_base.den);
1397  }
1398 
1399  do_video_out(of->ctx, ost, filtered_frame, float_pts);
1400  break;
1401  case AVMEDIA_TYPE_AUDIO:
1402  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1403  enc->channels != av_frame_get_channels(filtered_frame)) {
1405  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1406  break;
1407  }
1408  do_audio_out(of->ctx, ost, filtered_frame);
1409  break;
1410  default:
1411  // TODO support subtitle filters
1412  av_assert0(0);
1413  }
1414 
1415  av_frame_unref(filtered_frame);
1416  }
1417  }
1418 
1419  return 0;
1420 }
1421 
1422 static void print_final_stats(int64_t total_size)
1423 {
1424  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1425  uint64_t subtitle_size = 0;
1426  uint64_t data_size = 0;
1427  float percent = -1.0;
1428  int i, j;
1429  int pass1_used = 1;
1430 
1431  for (i = 0; i < nb_output_streams; i++) {
1432  OutputStream *ost = output_streams[i];
1433  switch (ost->enc_ctx->codec_type) {
1434  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1435  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1436  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1437  default: other_size += ost->data_size; break;
1438  }
1439  extra_size += ost->enc_ctx->extradata_size;
1440  data_size += ost->data_size;
1443  pass1_used = 0;
1444  }
1445 
1446  if (data_size && total_size>0 && total_size >= data_size)
1447  percent = 100.0 * (total_size - data_size) / data_size;
1448 
1449  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1450  video_size / 1024.0,
1451  audio_size / 1024.0,
1452  subtitle_size / 1024.0,
1453  other_size / 1024.0,
1454  extra_size / 1024.0);
1455  if (percent >= 0.0)
1456  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1457  else
1458  av_log(NULL, AV_LOG_INFO, "unknown");
1459  av_log(NULL, AV_LOG_INFO, "\n");
1460 
1461  /* print verbose per-stream stats */
1462  for (i = 0; i < nb_input_files; i++) {
1463  InputFile *f = input_files[i];
1464  uint64_t total_packets = 0, total_size = 0;
1465 
1466  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1467  i, f->ctx->filename);
1468 
1469  for (j = 0; j < f->nb_streams; j++) {
1470  InputStream *ist = input_streams[f->ist_index + j];
1471  enum AVMediaType type = ist->dec_ctx->codec_type;
1472 
1473  total_size += ist->data_size;
1474  total_packets += ist->nb_packets;
1475 
1476  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1477  i, j, media_type_string(type));
1478  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1479  ist->nb_packets, ist->data_size);
1480 
1481  if (ist->decoding_needed) {
1482  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1483  ist->frames_decoded);
1484  if (type == AVMEDIA_TYPE_AUDIO)
1485  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1486  av_log(NULL, AV_LOG_VERBOSE, "; ");
1487  }
1488 
1489  av_log(NULL, AV_LOG_VERBOSE, "\n");
1490  }
1491 
1492  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1493  total_packets, total_size);
1494  }
1495 
1496  for (i = 0; i < nb_output_files; i++) {
1497  OutputFile *of = output_files[i];
1498  uint64_t total_packets = 0, total_size = 0;
1499 
1500  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1501  i, of->ctx->filename);
1502 
1503  for (j = 0; j < of->ctx->nb_streams; j++) {
1504  OutputStream *ost = output_streams[of->ost_index + j];
1505  enum AVMediaType type = ost->enc_ctx->codec_type;
1506 
1507  total_size += ost->data_size;
1508  total_packets += ost->packets_written;
1509 
1510  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1511  i, j, media_type_string(type));
1512  if (ost->encoding_needed) {
1513  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1514  ost->frames_encoded);
1515  if (type == AVMEDIA_TYPE_AUDIO)
1516  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1517  av_log(NULL, AV_LOG_VERBOSE, "; ");
1518  }
1519 
1520  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1521  ost->packets_written, ost->data_size);
1522 
1523  av_log(NULL, AV_LOG_VERBOSE, "\n");
1524  }
1525 
1526  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1527  total_packets, total_size);
1528  }
1529  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1530  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1531  if (pass1_used) {
1532  av_log(NULL, AV_LOG_WARNING, "\n");
1533  } else {
1534  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1535  }
1536  }
1537 }
1538 
1539 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1540 {
1541  char buf[1024];
1542  AVBPrint buf_script;
1543  OutputStream *ost;
1544  AVFormatContext *oc;
1545  int64_t total_size;
1546  AVCodecContext *enc;
1547  int frame_number, vid, i;
1548  double bitrate;
1549  double speed;
1550  int64_t pts = INT64_MIN + 1;
1551  static int64_t last_time = -1;
1552  static int qp_histogram[52];
1553  int hours, mins, secs, us;
1554  int ret;
1555  float t;
1556 
1557  if (!print_stats && !is_last_report && !progress_avio)
1558  return;
1559 
1560  if (!is_last_report) {
1561  if (last_time == -1) {
1562  last_time = cur_time;
1563  return;
1564  }
1565  if ((cur_time - last_time) < 500000)
1566  return;
1567  last_time = cur_time;
1568  }
1569 
1570  t = (cur_time-timer_start) / 1000000.0;
1571 
1572 
1573  oc = output_files[0]->ctx;
1574 
1575  total_size = avio_size(oc->pb);
1576  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1577  total_size = avio_tell(oc->pb);
1578 
1579  buf[0] = '\0';
1580  vid = 0;
1581  av_bprint_init(&buf_script, 0, 1);
1582  for (i = 0; i < nb_output_streams; i++) {
1583  float q = -1;
1584  ost = output_streams[i];
1585  enc = ost->enc_ctx;
1586  if (!ost->stream_copy)
1587  q = ost->quality / (float) FF_QP2LAMBDA;
1588 
1589  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1590  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1591  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1592  ost->file_index, ost->index, q);
1593  }
1594  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1595  float fps;
1596 
1597  frame_number = ost->frame_number;
1598  fps = t > 1 ? frame_number / t : 0;
1599  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1600  frame_number, fps < 9.95, fps, q);
1601  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1602  av_bprintf(&buf_script, "fps=%.1f\n", fps);
1603  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1604  ost->file_index, ost->index, q);
1605  if (is_last_report)
1606  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1607  if (qp_hist) {
1608  int j;
1609  int qp = lrintf(q);
1610  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1611  qp_histogram[qp]++;
1612  for (j = 0; j < 32; j++)
1613  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1614  }
1615 
1616  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1617  int j;
1618  double error, error_sum = 0;
1619  double scale, scale_sum = 0;
1620  double p;
1621  char type[3] = { 'Y','U','V' };
1622  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1623  for (j = 0; j < 3; j++) {
1624  if (is_last_report) {
1625  error = enc->error[j];
1626  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1627  } else {
1628  error = ost->error[j];
1629  scale = enc->width * enc->height * 255.0 * 255.0;
1630  }
1631  if (j)
1632  scale /= 4;
1633  error_sum += error;
1634  scale_sum += scale;
1635  p = psnr(error / scale);
1636  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1637  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1638  ost->file_index, ost->index, type[j] | 32, p);
1639  }
1640  p = psnr(error_sum / scale_sum);
1641  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1642  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1643  ost->file_index, ost->index, p);
1644  }
1645  vid = 1;
1646  }
1647  /* compute min output value */
1649  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1650  ost->st->time_base, AV_TIME_BASE_Q));
1651  if (is_last_report)
1652  nb_frames_drop += ost->last_dropped;
1653  }
1654 
1655  secs = FFABS(pts) / AV_TIME_BASE;
1656  us = FFABS(pts) % AV_TIME_BASE;
1657  mins = secs / 60;
1658  secs %= 60;
1659  hours = mins / 60;
1660  mins %= 60;
1661 
1662  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1663  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1664 
1665  if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1666  "size=N/A time=");
1667  else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1668  "size=%8.0fkB time=", total_size / 1024.0);
1669  if (pts < 0)
1670  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1671  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1672  "%02d:%02d:%02d.%02d ", hours, mins, secs,
1673  (100 * us) / AV_TIME_BASE);
1674 
1675  if (bitrate < 0) {
1676  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1677  av_bprintf(&buf_script, "bitrate=N/A\n");
1678  }else{
1679  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1680  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1681  }
1682 
1683  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1684  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1685  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1686  av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1687  hours, mins, secs, us);
1688 
1690  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1692  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1693  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1694 
1695  if (speed < 0) {
1696  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1697  av_bprintf(&buf_script, "speed=N/A\n");
1698  } else {
1699  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1700  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1701  }
1702 
1703  if (print_stats || is_last_report) {
1704  const char end = is_last_report ? '\n' : '\r';
1705  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1706  fprintf(stderr, "%s %c", buf, end);
1707  } else
1708  av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1709 
1710  fflush(stderr);
1711  }
1712 
1713  if (progress_avio) {
1714  av_bprintf(&buf_script, "progress=%s\n",
1715  is_last_report ? "end" : "continue");
1716  avio_write(progress_avio, buf_script.str,
1717  FFMIN(buf_script.len, buf_script.size - 1));
1718  avio_flush(progress_avio);
1719  av_bprint_finalize(&buf_script, NULL);
1720  if (is_last_report) {
1721  if ((ret = avio_closep(&progress_avio)) < 0)
1723  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1724  }
1725  }
1726 
1727  if (is_last_report)
1728  print_final_stats(total_size);
1729 }
1730 
1731 static void flush_encoders(void)
1732 {
1733  int i, ret;
1734 
1735  for (i = 0; i < nb_output_streams; i++) {
1736  OutputStream *ost = output_streams[i];
1737  AVCodecContext *enc = ost->enc_ctx;
1738  AVFormatContext *os = output_files[ost->file_index]->ctx;
1739  int stop_encoding = 0;
1740 
1741  if (!ost->encoding_needed)
1742  continue;
1743 
1744  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1745  continue;
1746 #if FF_API_LAVF_FMT_RAWPICTURE
1747  if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1748  continue;
1749 #endif
1750 
1751  for (;;) {
1752  int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1753  const char *desc;
1754 
1755  switch (enc->codec_type) {
1756  case AVMEDIA_TYPE_AUDIO:
1757  encode = avcodec_encode_audio2;
1758  desc = "audio";
1759  break;
1760  case AVMEDIA_TYPE_VIDEO:
1761  encode = avcodec_encode_video2;
1762  desc = "video";
1763  break;
1764  default:
1765  stop_encoding = 1;
1766  }
1767 
1768  if (encode) {
1769  AVPacket pkt;
1770  int pkt_size;
1771  int got_packet;
1772  av_init_packet(&pkt);
1773  pkt.data = NULL;
1774  pkt.size = 0;
1775 
1777  ret = encode(enc, &pkt, NULL, &got_packet);
1778  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1779  if (ret < 0) {
1780  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1781  desc,
1782  av_err2str(ret));
1783  exit_program(1);
1784  }
1785  if (ost->logfile && enc->stats_out) {
1786  fprintf(ost->logfile, "%s", enc->stats_out);
1787  }
1788  if (!got_packet) {
1789  stop_encoding = 1;
1790  break;
1791  }
1792  if (ost->finished & MUXER_FINISHED) {
1793  av_packet_unref(&pkt);
1794  continue;
1795  }
1796  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1797  pkt_size = pkt.size;
1798  output_packet(os, &pkt, ost);
1800  do_video_stats(ost, pkt_size);
1801  }
1802  }
1803 
1804  if (stop_encoding)
1805  break;
1806  }
1807  }
1808 }
1809 
1810 /*
1811  * Check whether a packet from ist should be written into ost at this time
1812  */
1814 {
1815  OutputFile *of = output_files[ost->file_index];
1816  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1817 
1818  if (ost->source_index != ist_index)
1819  return 0;
1820 
1821  if (ost->finished)
1822  return 0;
1823 
1824  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1825  return 0;
1826 
1827  return 1;
1828 }
1829 
1830 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1831 {
1832  OutputFile *of = output_files[ost->file_index];
1833  InputFile *f = input_files [ist->file_index];
1834  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1835  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1836  AVPicture pict;
1837  AVPacket opkt;
1838 
1839  av_init_packet(&opkt);
1840 
1841  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1843  return;
1844 
1845  if (!ost->frame_number && !ost->copy_prior_start) {
1846  int64_t comp_start = start_time;
1847  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1848  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1849  if (pkt->pts == AV_NOPTS_VALUE ?
1850  ist->pts < comp_start :
1851  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1852  return;
1853  }
1854 
1855  if (of->recording_time != INT64_MAX &&
1856  ist->pts >= of->recording_time + start_time) {
1857  close_output_stream(ost);
1858  return;
1859  }
1860 
1861  if (f->recording_time != INT64_MAX) {
1862  start_time = f->ctx->start_time;
1863  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1864  start_time += f->start_time;
1865  if (ist->pts >= f->recording_time + start_time) {
1866  close_output_stream(ost);
1867  return;
1868  }
1869  }
1870 
1871  /* force the input stream PTS */
1872  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1873  ost->sync_opts++;
1874 
1875  if (pkt->pts != AV_NOPTS_VALUE)
1876  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1877  else
1878  opkt.pts = AV_NOPTS_VALUE;
1879 
1880  if (pkt->dts == AV_NOPTS_VALUE)
1881  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1882  else
1883  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1884  opkt.dts -= ost_tb_start_time;
1885 
1886  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1888  if(!duration)
1889  duration = ist->dec_ctx->frame_size;
1890  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1892  ost->st->time_base) - ost_tb_start_time;
1893  }
1894 
1895  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1896  opkt.flags = pkt->flags;
1897  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1898  if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
1899  && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
1900  && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
1901  && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
1902  ) {
1903  int ret = av_parser_change(ost->parser, ost->parser_avctx,
1904  &opkt.data, &opkt.size,
1905  pkt->data, pkt->size,
1907  if (ret < 0) {
1908  av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1909  av_err2str(ret));
1910  exit_program(1);
1911  }
1912  if (ret) {
1913  opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1914  if (!opkt.buf)
1915  exit_program(1);
1916  }
1917  } else {
1918  opkt.data = pkt->data;
1919  opkt.size = pkt->size;
1920  }
1921  av_copy_packet_side_data(&opkt, pkt);
1922 
1923 #if FF_API_LAVF_FMT_RAWPICTURE
1924  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
1925  ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
1926  (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1927  /* store AVPicture in AVPacket, as expected by the output format */
1928  int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
1929  if (ret < 0) {
1930  av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1931  av_err2str(ret));
1932  exit_program(1);
1933  }
1934  opkt.data = (uint8_t *)&pict;
1935  opkt.size = sizeof(AVPicture);
1936  opkt.flags |= AV_PKT_FLAG_KEY;
1937  }
1938 #endif
1939 
1940  output_packet(of->ctx, &opkt, ost);
1941 }
1942 
1944 {
1945  AVCodecContext *dec = ist->dec_ctx;
1946 
1947  if (!dec->channel_layout) {
1948  char layout_name[256];
1949 
1950  if (dec->channels > ist->guess_layout_max)
1951  return 0;
1953  if (!dec->channel_layout)
1954  return 0;
1955  av_get_channel_layout_string(layout_name, sizeof(layout_name),
1956  dec->channels, dec->channel_layout);
1957  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1958  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1959  }
1960  return 1;
1961 }
1962 
1963 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1964 {
1965  if (*got_output || ret<0)
1966  decode_error_stat[ret<0] ++;
1967 
1968  if (ret < 0 && exit_on_error)
1969  exit_program(1);
1970 
1971  if (exit_on_error && *got_output && ist) {
1973  av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
1974  exit_program(1);
1975  }
1976  }
1977 }
1978 
1979 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1980 {
1981  AVFrame *decoded_frame, *f;
1982  AVCodecContext *avctx = ist->dec_ctx;
1983  int i, ret, err = 0, resample_changed;
1984  AVRational decoded_frame_tb;
1985 
1986  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1987  return AVERROR(ENOMEM);
1988  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1989  return AVERROR(ENOMEM);
1990  decoded_frame = ist->decoded_frame;
1991 
1993  ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1994  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1995 
1996  if (ret >= 0 && avctx->sample_rate <= 0) {
1997  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1998  ret = AVERROR_INVALIDDATA;
1999  }
2000 
2001  check_decode_result(ist, got_output, ret);
2002 
2003  if (!*got_output || ret < 0)
2004  return ret;
2005 
2006  ist->samples_decoded += decoded_frame->nb_samples;
2007  ist->frames_decoded++;
2008 
2009 #if 1
2010  /* increment next_dts to use for the case where the input stream does not
2011  have timestamps or there are multiple frames in the packet */
2012  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2013  avctx->sample_rate;
2014  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2015  avctx->sample_rate;
2016 #endif
2017 
2018  resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
2019  ist->resample_channels != avctx->channels ||
2020  ist->resample_channel_layout != decoded_frame->channel_layout ||
2021  ist->resample_sample_rate != decoded_frame->sample_rate;
2022  if (resample_changed) {
2023  char layout1[64], layout2[64];
2024 
2025  if (!guess_input_channel_layout(ist)) {
2026  av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
2027  "layout for Input Stream #%d.%d\n", ist->file_index,
2028  ist->st->index);
2029  exit_program(1);
2030  }
2031  decoded_frame->channel_layout = avctx->channel_layout;
2032 
2033  av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2035  av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2036  decoded_frame->channel_layout);
2037 
2039  "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2040  ist->file_index, ist->st->index,
2042  ist->resample_channels, layout1,
2043  decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2044  avctx->channels, layout2);
2045 
2046  ist->resample_sample_fmt = decoded_frame->format;
2047  ist->resample_sample_rate = decoded_frame->sample_rate;
2048  ist->resample_channel_layout = decoded_frame->channel_layout;
2049  ist->resample_channels = avctx->channels;
2050 
2051  for (i = 0; i < nb_filtergraphs; i++)
2052  if (ist_in_filtergraph(filtergraphs[i], ist)) {
2053  FilterGraph *fg = filtergraphs[i];
2054  if (configure_filtergraph(fg) < 0) {
2055  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2056  exit_program(1);
2057  }
2058  }
2059  }
2060 
2061  /* if the decoder provides a pts, use it instead of the last packet pts.
2062  the decoder could be delaying output by a packet or more. */
2063  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2064  ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2065  decoded_frame_tb = avctx->time_base;
2066  } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2067  decoded_frame->pts = decoded_frame->pkt_pts;
2068  decoded_frame_tb = ist->st->time_base;
2069  } else if (pkt->pts != AV_NOPTS_VALUE) {
2070  decoded_frame->pts = pkt->pts;
2071  decoded_frame_tb = ist->st->time_base;
2072  }else {
2073  decoded_frame->pts = ist->dts;
2074  decoded_frame_tb = AV_TIME_BASE_Q;
2075  }
2076  pkt->pts = AV_NOPTS_VALUE;
2077  if (decoded_frame->pts != AV_NOPTS_VALUE)
2078  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2079  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2080  (AVRational){1, avctx->sample_rate});
2081  ist->nb_samples = decoded_frame->nb_samples;
2082  for (i = 0; i < ist->nb_filters; i++) {
2083  if (i < ist->nb_filters - 1) {
2084  f = ist->filter_frame;
2085  err = av_frame_ref(f, decoded_frame);
2086  if (err < 0)
2087  break;
2088  } else
2089  f = decoded_frame;
2090  err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2092  if (err == AVERROR_EOF)
2093  err = 0; /* ignore */
2094  if (err < 0)
2095  break;
2096  }
2097  decoded_frame->pts = AV_NOPTS_VALUE;
2098 
2099  av_frame_unref(ist->filter_frame);
2100  av_frame_unref(decoded_frame);
2101  return err < 0 ? err : ret;
2102 }
2103 
2104 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2105 {
2106  AVFrame *decoded_frame, *f;
2107  int i, ret = 0, err = 0, resample_changed;
2108  int64_t best_effort_timestamp;
2109  AVRational *frame_sample_aspect;
2110 
2111  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2112  return AVERROR(ENOMEM);
2113  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2114  return AVERROR(ENOMEM);
2115  decoded_frame = ist->decoded_frame;
2116  pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2117 
2119  ret = avcodec_decode_video2(ist->dec_ctx,
2120  decoded_frame, got_output, pkt);
2121  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2122 
2123  // The following line may be required in some cases where there is no parser
2124  // or the parser does not has_b_frames correctly
2125  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2126  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2127  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2128  } else
2130  "video_delay is larger in decoder than demuxer %d > %d.\n"
2131  "If you want to help, upload a sample "
2132  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2133  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2134  ist->dec_ctx->has_b_frames,
2135  ist->st->codecpar->video_delay);
2136  }
2137 
2138  check_decode_result(ist, got_output, ret);
2139 
2140  if (*got_output && ret >= 0) {
2141  if (ist->dec_ctx->width != decoded_frame->width ||
2142  ist->dec_ctx->height != decoded_frame->height ||
2143  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2144  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2145  decoded_frame->width,
2146  decoded_frame->height,
2147  decoded_frame->format,
2148  ist->dec_ctx->width,
2149  ist->dec_ctx->height,
2150  ist->dec_ctx->pix_fmt);
2151  }
2152  }
2153 
2154  if (!*got_output || ret < 0)
2155  return ret;
2156 
2157  if(ist->top_field_first>=0)
2158  decoded_frame->top_field_first = ist->top_field_first;
2159 
2160  ist->frames_decoded++;
2161 
2162  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2163  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2164  if (err < 0)
2165  goto fail;
2166  }
2167  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2168 
2169  best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2170  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2171  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2172 
2173  if (ts != AV_NOPTS_VALUE)
2174  ist->next_pts = ist->pts = ts;
2175  }
2176 
2177  if (debug_ts) {
2178  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2179  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2180  ist->st->index, av_ts2str(decoded_frame->pts),
2181  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2182  best_effort_timestamp,
2183  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2184  decoded_frame->key_frame, decoded_frame->pict_type,
2185  ist->st->time_base.num, ist->st->time_base.den);
2186  }
2187 
2188  pkt->size = 0;
2189 
2190  if (ist->st->sample_aspect_ratio.num)
2191  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2192 
2193  resample_changed = ist->resample_width != decoded_frame->width ||
2194  ist->resample_height != decoded_frame->height ||
2195  ist->resample_pix_fmt != decoded_frame->format;
2196  if (resample_changed) {
2198  "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2199  ist->file_index, ist->st->index,
2201  decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2202 
2203  ist->resample_width = decoded_frame->width;
2204  ist->resample_height = decoded_frame->height;
2205  ist->resample_pix_fmt = decoded_frame->format;
2206 
2207  for (i = 0; i < nb_filtergraphs; i++) {
2208  if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2209  configure_filtergraph(filtergraphs[i]) < 0) {
2210  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2211  exit_program(1);
2212  }
2213  }
2214  }
2215 
2216  frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2217  for (i = 0; i < ist->nb_filters; i++) {
2218  if (!frame_sample_aspect->num)
2219  *frame_sample_aspect = ist->st->sample_aspect_ratio;
2220 
2221  if (i < ist->nb_filters - 1) {
2222  f = ist->filter_frame;
2223  err = av_frame_ref(f, decoded_frame);
2224  if (err < 0)
2225  break;
2226  } else
2227  f = decoded_frame;
2229  if (ret == AVERROR_EOF) {
2230  ret = 0; /* ignore */
2231  } else if (ret < 0) {
2233  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2234  exit_program(1);
2235  }
2236  }
2237 
2238 fail:
2240  av_frame_unref(decoded_frame);
2241  return err < 0 ? err : ret;
2242 }
2243 
2244 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2245 {
2246  AVSubtitle subtitle;
2247  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2248  &subtitle, got_output, pkt);
2249 
2250  check_decode_result(NULL, got_output, ret);
2251 
2252  if (ret < 0 || !*got_output) {
2253  if (!pkt->size)
2254  sub2video_flush(ist);
2255  return ret;
2256  }
2257 
2258  if (ist->fix_sub_duration) {
2259  int end = 1;
2260  if (ist->prev_sub.got_output) {
2261  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2262  1000, AV_TIME_BASE);
2263  if (end < ist->prev_sub.subtitle.end_display_time) {
2264  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2265  "Subtitle duration reduced from %d to %d%s\n",
2267  end <= 0 ? ", dropping it" : "");
2269  }
2270  }
2271  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2272  FFSWAP(int, ret, ist->prev_sub.ret);
2273  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2274  if (end <= 0)
2275  goto out;
2276  }
2277 
2278  if (!*got_output)
2279  return ret;
2280 
2281  sub2video_update(ist, &subtitle);
2282 
2283  if (!subtitle.num_rects)
2284  goto out;
2285 
2286  ist->frames_decoded++;
2287 
2288  for (i = 0; i < nb_output_streams; i++) {
2289  OutputStream *ost = output_streams[i];
2290 
2291  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2292  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2293  continue;
2294 
2295  do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2296  }
2297 
2298 out:
2299  avsubtitle_free(&subtitle);
2300  return ret;
2301 }
2302 
2304 {
2305  int i, ret;
2306  for (i = 0; i < ist->nb_filters; i++) {
2307  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2308  if (ret < 0)
2309  return ret;
2310  }
2311  return 0;
2312 }
2313 
2314 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2315 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2316 {
2317  int ret = 0, i;
2318  int got_output = 0;
2319 
2320  AVPacket avpkt;
2321  if (!ist->saw_first_ts) {
2322  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2323  ist->pts = 0;
2324  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2325  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2326  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2327  }
2328  ist->saw_first_ts = 1;
2329  }
2330 
2331  if (ist->next_dts == AV_NOPTS_VALUE)
2332  ist->next_dts = ist->dts;
2333  if (ist->next_pts == AV_NOPTS_VALUE)
2334  ist->next_pts = ist->pts;
2335 
2336  if (!pkt) {
2337  /* EOF handling */
2338  av_init_packet(&avpkt);
2339  avpkt.data = NULL;
2340  avpkt.size = 0;
2341  goto handle_eof;
2342  } else {
2343  avpkt = *pkt;
2344  }
2345 
2346  if (pkt->dts != AV_NOPTS_VALUE) {
2347  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2348  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2349  ist->next_pts = ist->pts = ist->dts;
2350  }
2351 
2352  // while we have more to decode or while the decoder did output something on EOF
2353  while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2354  int duration;
2355  handle_eof:
2356 
2357  ist->pts = ist->next_pts;
2358  ist->dts = ist->next_dts;
2359 
2360  if (avpkt.size && avpkt.size != pkt->size &&
2363  "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2364  ist->showed_multi_packet_warning = 1;
2365  }
2366 
2367  switch (ist->dec_ctx->codec_type) {
2368  case AVMEDIA_TYPE_AUDIO:
2369  ret = decode_audio (ist, &avpkt, &got_output);
2370  break;
2371  case AVMEDIA_TYPE_VIDEO:
2372  ret = decode_video (ist, &avpkt, &got_output);
2373  if (avpkt.duration) {
2374  duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2375  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2377  duration = ((int64_t)AV_TIME_BASE *
2378  ist->dec_ctx->framerate.den * ticks) /
2380  } else
2381  duration = 0;
2382 
2383  if(ist->dts != AV_NOPTS_VALUE && duration) {
2384  ist->next_dts += duration;
2385  }else
2386  ist->next_dts = AV_NOPTS_VALUE;
2387 
2388  if (got_output)
2389  ist->next_pts += duration; //FIXME the duration is not correct in some cases
2390  break;
2391  case AVMEDIA_TYPE_SUBTITLE:
2392  ret = transcode_subtitles(ist, &avpkt, &got_output);
2393  break;
2394  default:
2395  return -1;
2396  }
2397 
2398  if (ret < 0) {
2399  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2400  ist->file_index, ist->st->index, av_err2str(ret));
2401  if (exit_on_error)
2402  exit_program(1);
2403  break;
2404  }
2405 
2406  avpkt.dts=
2407  avpkt.pts= AV_NOPTS_VALUE;
2408 
2409  // touch data and size only if not EOF
2410  if (pkt) {
2412  ret = avpkt.size;
2413  avpkt.data += ret;
2414  avpkt.size -= ret;
2415  }
2416  if (!got_output) {
2417  continue;
2418  }
2419  if (got_output && !pkt)
2420  break;
2421  }
2422 
2423  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2424  /* except when looping we need to flush but not to send an EOF */
2425  if (!pkt && ist->decoding_needed && !got_output && !no_eof) {
2426  int ret = send_filter_eof(ist);
2427  if (ret < 0) {
2428  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2429  exit_program(1);
2430  }
2431  }
2432 
2433  /* handle stream copy */
2434  if (!ist->decoding_needed) {
2435  ist->dts = ist->next_dts;
2436  switch (ist->dec_ctx->codec_type) {
2437  case AVMEDIA_TYPE_AUDIO:
2438  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2439  ist->dec_ctx->sample_rate;
2440  break;
2441  case AVMEDIA_TYPE_VIDEO:
2442  if (ist->framerate.num) {
2443  // TODO: Remove work-around for c99-to-c89 issue 7
2444  AVRational time_base_q = AV_TIME_BASE_Q;
2445  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2446  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2447  } else if (pkt->duration) {
2448  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2449  } else if(ist->dec_ctx->framerate.num != 0) {
2450  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2451  ist->next_dts += ((int64_t)AV_TIME_BASE *
2452  ist->dec_ctx->framerate.den * ticks) /
2454  }
2455  break;
2456  }
2457  ist->pts = ist->dts;
2458  ist->next_pts = ist->next_dts;
2459  }
2460  for (i = 0; pkt && i < nb_output_streams; i++) {
2461  OutputStream *ost = output_streams[i];
2462 
2463  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2464  continue;
2465 
2466  do_streamcopy(ist, ost, pkt);
2467  }
2468 
2469  return got_output;
2470 }
2471 
2472 static void print_sdp(void)
2473 {
2474  char sdp[16384];
2475  int i;
2476  int j;
2477  AVIOContext *sdp_pb;
2478  AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2479 
2480  if (!avc)
2481  exit_program(1);
2482  for (i = 0, j = 0; i < nb_output_files; i++) {
2483  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2484  avc[j] = output_files[i]->ctx;
2485  j++;
2486  }
2487  }
2488 
2489  if (!j)
2490  goto fail;
2491 
2492  av_sdp_create(avc, j, sdp, sizeof(sdp));
2493 
2494  if (!sdp_filename) {
2495  printf("SDP:\n%s\n", sdp);
2496  fflush(stdout);
2497  } else {
2498  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2499  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2500  } else {
2501  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2502  avio_closep(&sdp_pb);
2504  }
2505  }
2506 
2507 fail:
2508  av_freep(&avc);
2509 }
2510 
2512 {
2513  int i;
2514  for (i = 0; hwaccels[i].name; i++)
2515  if (hwaccels[i].pix_fmt == pix_fmt)
2516  return &hwaccels[i];
2517  return NULL;
2518 }
2519 
2521 {
2522  InputStream *ist = s->opaque;
2523  const enum AVPixelFormat *p;
2524  int ret;
2525 
2526  for (p = pix_fmts; *p != -1; p++) {
2528  const HWAccel *hwaccel;
2529 
2530  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2531  break;
2532 
2533  hwaccel = get_hwaccel(*p);
2534  if (!hwaccel ||
2535  (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2536  (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2537  continue;
2538 
2539  ret = hwaccel->init(s);
2540  if (ret < 0) {
2541  if (ist->hwaccel_id == hwaccel->id) {
2543  "%s hwaccel requested for input stream #%d:%d, "
2544  "but cannot be initialized.\n", hwaccel->name,
2545  ist->file_index, ist->st->index);
2546  return AV_PIX_FMT_NONE;
2547  }
2548  continue;
2549  }
2550 
2551  if (ist->hw_frames_ctx) {
2553  if (!s->hw_frames_ctx)
2554  return AV_PIX_FMT_NONE;
2555  }
2556 
2557  ist->active_hwaccel_id = hwaccel->id;
2558  ist->hwaccel_pix_fmt = *p;
2559  break;
2560  }
2561 
2562  return *p;
2563 }
2564 
2566 {
2567  InputStream *ist = s->opaque;
2568 
2569  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2570  return ist->hwaccel_get_buffer(s, frame, flags);
2571 
2572  return avcodec_default_get_buffer2(s, frame, flags);
2573 }
2574 
2575 static int init_input_stream(int ist_index, char *error, int error_len)
2576 {
2577  int ret;
2578  InputStream *ist = input_streams[ist_index];
2579 
2580  if (ist->decoding_needed) {
2581  AVCodec *codec = ist->dec;
2582  if (!codec) {
2583  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2584  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2585  return AVERROR(EINVAL);
2586  }
2587 
2588  ist->dec_ctx->opaque = ist;
2589  ist->dec_ctx->get_format = get_format;
2590  ist->dec_ctx->get_buffer2 = get_buffer;
2591  ist->dec_ctx->thread_safe_callbacks = 1;
2592 
2593  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2594  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2595  (ist->decoding_needed & DECODING_FOR_OST)) {
2596  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2598  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2599  }
2600 
2601  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2602 
2603  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2604  * audio, and video decoders such as cuvid or mediacodec */
2606 
2607  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2608  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2609  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2610  if (ret == AVERROR_EXPERIMENTAL)
2611  abort_codec_experimental(codec, 0);
2612 
2613  snprintf(error, error_len,
2614  "Error while opening decoder for input stream "
2615  "#%d:%d : %s",
2616  ist->file_index, ist->st->index, av_err2str(ret));
2617  return ret;
2618  }
2620  }
2621 
2622  ist->next_pts = AV_NOPTS_VALUE;
2623  ist->next_dts = AV_NOPTS_VALUE;
2624 
2625  return 0;
2626 }
2627 
2629 {
2630  if (ost->source_index >= 0)
2631  return input_streams[ost->source_index];
2632  return NULL;
2633 }
2634 
2635 static int compare_int64(const void *a, const void *b)
2636 {
2637  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2638 }
2639 
2641 {
2642  AVBSFContext *ctx;
2643  int i, ret;
2644 
2645  if (!ost->nb_bitstream_filters)
2646  return 0;
2647 
2648  for (i = 0; i < ost->nb_bitstream_filters; i++) {
2649  ctx = ost->bsf_ctx[i];
2650 
2651  ret = avcodec_parameters_copy(ctx->par_in,
2652  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2653  if (ret < 0)
2654  return ret;
2655 
2656  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2657 
2658  ret = av_bsf_init(ctx);
2659  if (ret < 0) {
2660  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2661  ost->bsf_ctx[i]->filter->name);
2662  return ret;
2663  }
2664  }
2665 
2666  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
2667  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2668  if (ret < 0)
2669  return ret;
2670 
2671  ost->st->time_base = ctx->time_base_out;
2672 
2673  return 0;
2674 }
2675 
2677 {
2678  OutputFile *of = output_files[ost->file_index];
2679  InputStream *ist = get_input_stream(ost);
2680  AVCodecParameters *par_dst = ost->st->codecpar;
2681  AVCodecParameters *par_src = ost->ref_par;
2682  AVRational sar;
2683  int i, ret;
2684  uint64_t extra_size;
2685 
2686  av_assert0(ist && !ost->filter);
2687 
2689  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2690  if (ret < 0) {
2692  "Error setting up codec context options.\n");
2693  return ret;
2694  }
2696 
2697  extra_size = (uint64_t)par_src->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2698 
2699  if (extra_size > INT_MAX) {
2700  return AVERROR(EINVAL);
2701  }
2702 
2703  /* if stream_copy is selected, no need to decode or encode */
2704  par_dst->codec_id = par_src->codec_id;
2705  par_dst->codec_type = par_src->codec_type;
2706 
2707  if (!par_dst->codec_tag) {
2708  unsigned int codec_tag;
2709  if (!of->ctx->oformat->codec_tag ||
2710  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_dst->codec_id ||
2711  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag))
2712  par_dst->codec_tag = par_src->codec_tag;
2713  }
2714 
2715  par_dst->bit_rate = par_src->bit_rate;
2716  par_dst->field_order = par_src->field_order;
2717  par_dst->chroma_location = par_src->chroma_location;
2718 
2719  if (par_src->extradata_size) {
2720  par_dst->extradata = av_mallocz(extra_size);
2721  if (!par_dst->extradata) {
2722  return AVERROR(ENOMEM);
2723  }
2724  memcpy(par_dst->extradata, par_src->extradata, par_src->extradata_size);
2725  par_dst->extradata_size = par_src->extradata_size;
2726  }
2727  par_dst->bits_per_coded_sample = par_src->bits_per_coded_sample;
2728  par_dst->bits_per_raw_sample = par_src->bits_per_raw_sample;
2729 
2730  if (!ost->frame_rate.num)
2731  ost->frame_rate = ist->framerate;
2732  ost->st->avg_frame_rate = ost->frame_rate;
2733 
2735  if (ret < 0)
2736  return ret;
2737 
2738  // copy timebase while removing common factors
2740 
2741  if (ist->st->nb_side_data) {
2742  ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2743  sizeof(*ist->st->side_data));
2744  if (!ost->st->side_data)
2745  return AVERROR(ENOMEM);
2746 
2747  ost->st->nb_side_data = 0;
2748  for (i = 0; i < ist->st->nb_side_data; i++) {
2749  const AVPacketSideData *sd_src = &ist->st->side_data[i];
2750  AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2751 
2752  if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2753  continue;
2754 
2755  sd_dst->data = av_malloc(sd_src->size);
2756  if (!sd_dst->data)
2757  return AVERROR(ENOMEM);
2758  memcpy(sd_dst->data, sd_src->data, sd_src->size);
2759  sd_dst->size = sd_src->size;
2760  sd_dst->type = sd_src->type;
2761  ost->st->nb_side_data++;
2762  }
2763  }
2764 
2765  ost->parser = av_parser_init(par_dst->codec_id);
2766  ost->parser_avctx = avcodec_alloc_context3(NULL);
2767  if (!ost->parser_avctx)
2768  return AVERROR(ENOMEM);
2769 
2770  switch (par_dst->codec_type) {
2771  case AVMEDIA_TYPE_AUDIO:
2772  if (audio_volume != 256) {
2773  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2774  exit_program(1);
2775  }
2776  par_dst->channel_layout = par_src->channel_layout;
2777  par_dst->sample_rate = par_src->sample_rate;
2778  par_dst->channels = par_src->channels;
2779  par_dst->frame_size = par_src->frame_size;
2780  par_dst->block_align = par_src->block_align;
2781  par_dst->initial_padding = par_src->initial_padding;
2782  par_dst->trailing_padding = par_src->trailing_padding;
2783  par_dst->profile = par_src->profile;
2784  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
2785  par_dst->block_align= 0;
2786  if(par_dst->codec_id == AV_CODEC_ID_AC3)
2787  par_dst->block_align= 0;
2788  break;
2789  case AVMEDIA_TYPE_VIDEO:
2790  par_dst->format = par_src->format;
2791  par_dst->color_space = par_src->color_space;
2792  par_dst->color_range = par_src->color_range;
2793  par_dst->color_primaries = par_src->color_primaries;
2794  par_dst->color_trc = par_src->color_trc;
2795  par_dst->width = par_src->width;
2796  par_dst->height = par_src->height;
2797  par_dst->video_delay = par_src->video_delay;
2798  par_dst->profile = par_src->profile;
2799  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2800  sar =
2801  av_mul_q(ost->frame_aspect_ratio,
2802  (AVRational){ par_dst->height, par_dst->width });
2803  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2804  "with stream copy may produce invalid files\n");
2805  }
2806  else if (ist->st->sample_aspect_ratio.num)
2807  sar = ist->st->sample_aspect_ratio;
2808  else
2809  sar = par_src->sample_aspect_ratio;
2810  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
2811  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2812  ost->st->r_frame_rate = ist->st->r_frame_rate;
2813  break;
2814  case AVMEDIA_TYPE_SUBTITLE:
2815  par_dst->width = par_src->width;
2816  par_dst->height = par_src->height;
2817  break;
2818  case AVMEDIA_TYPE_UNKNOWN:
2819  case AVMEDIA_TYPE_DATA:
2821  break;
2822  default:
2823  abort();
2824  }
2825 
2826  return 0;
2827 }
2828 
2829 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2830 {
2831  int ret = 0;
2832 
2833  if (ost->encoding_needed) {
2834  AVCodec *codec = ost->enc;
2835  AVCodecContext *dec = NULL;
2836  InputStream *ist;
2837 
2838  if ((ist = get_input_stream(ost)))
2839  dec = ist->dec_ctx;
2840  if (dec && dec->subtitle_header) {
2841  /* ASS code assumes this buffer is null terminated so add extra byte. */
2843  if (!ost->enc_ctx->subtitle_header)
2844  return AVERROR(ENOMEM);
2845  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2847  }
2848  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2849  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2850  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2851  !codec->defaults &&
2852  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2853  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2854  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2855 
2856  if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx) {
2858  if (!ost->enc_ctx->hw_frames_ctx)
2859  return AVERROR(ENOMEM);
2860  }
2861 
2862  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2863  if (ret == AVERROR_EXPERIMENTAL)
2864  abort_codec_experimental(codec, 1);
2865  snprintf(error, error_len,
2866  "Error while opening encoder for output stream #%d:%d - "
2867  "maybe incorrect parameters such as bit_rate, rate, width or height",
2868  ost->file_index, ost->index);
2869  return ret;
2870  }
2871  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2872  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2874  ost->enc_ctx->frame_size);
2876  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2877  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2878  " It takes bits/s as argument, not kbits/s\n");
2879 
2881  if (ret < 0) {
2883  "Error initializing the output stream codec context.\n");
2884  exit_program(1);
2885  }
2886  /*
2887  * FIXME: ost->st->codec should't be needed here anymore.
2888  */
2889  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2890  if (ret < 0)
2891  return ret;
2892 
2893  if (ost->enc_ctx->nb_coded_side_data) {
2894  int i;
2895 
2897  sizeof(*ost->st->side_data));
2898  if (!ost->st->side_data)
2899  return AVERROR(ENOMEM);
2900 
2901  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
2902  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
2903  AVPacketSideData *sd_dst = &ost->st->side_data[i];
2904 
2905  sd_dst->data = av_malloc(sd_src->size);
2906  if (!sd_dst->data)
2907  return AVERROR(ENOMEM);
2908  memcpy(sd_dst->data, sd_src->data, sd_src->size);
2909  sd_dst->size = sd_src->size;
2910  sd_dst->type = sd_src->type;
2911  ost->st->nb_side_data++;
2912  }
2913  }
2914 
2915  // copy timebase while removing common factors
2916  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2917  ost->st->codec->codec= ost->enc_ctx->codec;
2918  } else if (ost->stream_copy) {
2919  ret = init_output_stream_streamcopy(ost);
2920  if (ret < 0)
2921  return ret;
2922 
2923  /*
2924  * FIXME: will the codec context used by the parser during streamcopy
2925  * This should go away with the new parser API.
2926  */
2927  ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
2928  if (ret < 0)
2929  return ret;
2930  }
2931 
2932  /* initialize bitstream filters for the output stream
2933  * needs to be done here, because the codec id for streamcopy is not
2934  * known until now */
2935  ret = init_output_bsfs(ost);
2936  if (ret < 0)
2937  return ret;
2938 
2939  return ret;
2940 }
2941 
2942 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2943  AVCodecContext *avctx)
2944 {
2945  char *p;
2946  int n = 1, i, size, index = 0;
2947  int64_t t, *pts;
2948 
2949  for (p = kf; *p; p++)
2950  if (*p == ',')
2951  n++;
2952  size = n;
2953  pts = av_malloc_array(size, sizeof(*pts));
2954  if (!pts) {
2955  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2956  exit_program(1);
2957  }
2958 
2959  p = kf;
2960  for (i = 0; i < n; i++) {
2961  char *next = strchr(p, ',');
2962 
2963  if (next)
2964  *next++ = 0;
2965 
2966  if (!memcmp(p, "chapters", 8)) {
2967 
2968  AVFormatContext *avf = output_files[ost->file_index]->ctx;
2969  int j;
2970 
2971  if (avf->nb_chapters > INT_MAX - size ||
2972  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2973  sizeof(*pts)))) {
2975  "Could not allocate forced key frames array.\n");
2976  exit_program(1);
2977  }
2978  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2979  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2980 
2981  for (j = 0; j < avf->nb_chapters; j++) {
2982  AVChapter *c = avf->chapters[j];
2983  av_assert1(index < size);
2984  pts[index++] = av_rescale_q(c->start, c->time_base,
2985  avctx->time_base) + t;
2986  }
2987 
2988  } else {
2989 
2990  t = parse_time_or_die("force_key_frames", p, 1);
2991  av_assert1(index < size);
2992  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2993 
2994  }
2995 
2996  p = next;
2997  }
2998 
2999  av_assert0(index == size);
3000  qsort(pts, size, sizeof(*pts), compare_int64);
3001  ost->forced_kf_count = size;
3002  ost->forced_kf_pts = pts;
3003 }
3004 
3005 static void report_new_stream(int input_index, AVPacket *pkt)
3006 {
3007  InputFile *file = input_files[input_index];
3008  AVStream *st = file->ctx->streams[pkt->stream_index];
3009 
3010  if (pkt->stream_index < file->nb_streams_warn)
3011  return;
3012  av_log(file->ctx, AV_LOG_WARNING,
3013  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3015  input_index, pkt->stream_index,
3016  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3017  file->nb_streams_warn = pkt->stream_index + 1;
3018 }
3019 
3021 {
3022  AVDictionaryEntry *e;
3023 
3024  uint8_t *encoder_string;
3025  int encoder_string_len;
3026  int format_flags = 0;
3027  int codec_flags = 0;
3028 
3029  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3030  return;
3031 
3032  e = av_dict_get(of->opts, "fflags", NULL, 0);
3033  if (e) {
3034  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3035  if (!o)
3036  return;
3037  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3038  }
3039  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3040  if (e) {
3041  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3042  if (!o)
3043  return;
3044  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3045  }
3046 
3047  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3048  encoder_string = av_mallocz(encoder_string_len);
3049  if (!encoder_string)
3050  exit_program(1);
3051 
3052  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3053  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3054  else
3055  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3056  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3057  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3059 }
3060 
3061 static int transcode_init(void)
3062 {
3063  int ret = 0, i, j, k;
3064  AVFormatContext *oc;
3065  OutputStream *ost;
3066  InputStream *ist;
3067  char error[1024] = {0};
3068  int want_sdp = 1;
3069 
3070  for (i = 0; i < nb_filtergraphs; i++) {
3071  FilterGraph *fg = filtergraphs[i];
3072  for (j = 0; j < fg->nb_outputs; j++) {
3073  OutputFilter *ofilter = fg->outputs[j];
3074  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3075  continue;
3076  if (fg->nb_inputs != 1)
3077  continue;
3078  for (k = nb_input_streams-1; k >= 0 ; k--)
3079  if (fg->inputs[0]->ist == input_streams[k])
3080  break;
3081  ofilter->ost->source_index = k;
3082  }
3083  }
3084 
3085  /* init framerate emulation */
3086  for (i = 0; i < nb_input_files; i++) {
3087  InputFile *ifile = input_files[i];
3088  if (ifile->rate_emu)
3089  for (j = 0; j < ifile->nb_streams; j++)
3090  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3091  }
3092 
3093  /* for each output stream, we compute the right encoding parameters */
3094  for (i = 0; i < nb_output_streams; i++) {
3095  ost = output_streams[i];
3096  oc = output_files[ost->file_index]->ctx;
3097  ist = get_input_stream(ost);
3098 
3099  if (ost->attachment_filename)
3100  continue;
3101 
3102  if (ist) {
3103  ost->st->disposition = ist->st->disposition;
3104  } else {
3105  for (j=0; j<oc->nb_streams; j++) {
3106  AVStream *st = oc->streams[j];
3107  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3108  break;
3109  }
3110  if (j == oc->nb_streams)
3111  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3114  }
3115 
3116  if (!ost->stream_copy) {
3117  AVCodecContext *enc_ctx = ost->enc_ctx;
3119 
3120  set_encoder_id(output_files[ost->file_index], ost);
3121 
3122  if (ist) {
3123  dec_ctx = ist->dec_ctx;
3124 
3125  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3126  }
3127 
3128 #if CONFIG_LIBMFX
3129  if (qsv_transcode_init(ost))
3130  exit_program(1);
3131 #endif
3132 
3133 #if CONFIG_CUVID
3134  if (cuvid_transcode_init(ost))
3135  exit_program(1);
3136 #endif
3137 
3138  if ((enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3139  enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3141  FilterGraph *fg = ost->filter->graph;
3142  if (configure_filtergraph(fg)) {
3143  av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3144  exit_program(1);
3145  }
3146  }
3147 
3148  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3149  if (!ost->frame_rate.num)
3151  if (ist && !ost->frame_rate.num)
3152  ost->frame_rate = ist->framerate;
3153  if (ist && !ost->frame_rate.num)
3154  ost->frame_rate = ist->st->r_frame_rate;
3155  if (ist && !ost->frame_rate.num) {
3156  ost->frame_rate = (AVRational){25, 1};
3158  "No information "
3159  "about the input framerate is available. Falling "
3160  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3161  "if you want a different framerate.\n",
3162  ost->file_index, ost->index);
3163  }
3164 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3165  if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3166  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3167  ost->frame_rate = ost->enc->supported_framerates[idx];
3168  }
3169  // reduce frame rate for mpeg4 to be within the spec limits
3170  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3171  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3172  ost->frame_rate.num, ost->frame_rate.den, 65535);
3173  }
3174  }
3175 
3176  switch (enc_ctx->codec_type) {
3177  case AVMEDIA_TYPE_AUDIO:
3178  enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3179  if (dec_ctx)
3180  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3181  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3182  enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3183  enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3184  enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3185  enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3186  break;
3187  case AVMEDIA_TYPE_VIDEO:
3188  enc_ctx->time_base = av_inv_q(ost->frame_rate);
3189  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3190  enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3191  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3193  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3194  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3195  }
3196  for (j = 0; j < ost->forced_kf_count; j++)
3197  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3199  enc_ctx->time_base);
3200 
3201  enc_ctx->width = ost->filter->filter->inputs[0]->w;
3202  enc_ctx->height = ost->filter->filter->inputs[0]->h;
3203  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3204  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3205  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3207  if (!strncmp(ost->enc->name, "libx264", 7) &&
3208  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3211  "No pixel format specified, %s for H.264 encoding chosen.\n"
3212  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3214  if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3215  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3218  "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3219  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3221  enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3222  if (dec_ctx)
3223  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3224  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3225 
3226  ost->st->avg_frame_rate = ost->frame_rate;
3227 
3228  if (!dec_ctx ||
3229  enc_ctx->width != dec_ctx->width ||
3230  enc_ctx->height != dec_ctx->height ||
3231  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3233  }
3234 
3235  if (ost->forced_keyframes) {
3236  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3239  if (ret < 0) {
3241  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3242  return ret;
3243  }
3248 
3249  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3250  // parse it only for static kf timings
3251  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3253  }
3254  }
3255  break;
3256  case AVMEDIA_TYPE_SUBTITLE:
3257  enc_ctx->time_base = (AVRational){1, 1000};
3258  if (!enc_ctx->width) {
3259  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3260  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3261  }
3262  break;
3263  case AVMEDIA_TYPE_DATA:
3264  break;
3265  default:
3266  abort();
3267  break;
3268  }
3269  }
3270 
3271  if (ost->disposition) {
3272  static const AVOption opts[] = {
3273  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3274  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3275  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3276  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3277  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3278  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3279  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3280  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3281  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3282  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3283  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3284  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3285  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3286  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3287  { NULL },
3288  };
3289  static const AVClass class = {
3290  .class_name = "",
3291  .item_name = av_default_item_name,
3292  .option = opts,
3293  .version = LIBAVUTIL_VERSION_INT,
3294  };
3295  const AVClass *pclass = &class;
3296 
3297  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3298  if (ret < 0)
3299  goto dump_format;
3300  }
3301  }
3302 
3303  /* init input streams */
3304  for (i = 0; i < nb_input_streams; i++)
3305  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3306  for (i = 0; i < nb_output_streams; i++) {
3307  ost = output_streams[i];
3308  avcodec_close(ost->enc_ctx);
3309  }
3310  goto dump_format;
3311  }
3312 
3313  /* open each encoder */
3314  for (i = 0; i < nb_output_streams; i++) {
3315  ret = init_output_stream(output_streams[i], error, sizeof(error));
3316  if (ret < 0)
3317  goto dump_format;
3318  }
3319 
3320  /* discard unused programs */
3321  for (i = 0; i < nb_input_files; i++) {
3322  InputFile *ifile = input_files[i];
3323  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3324  AVProgram *p = ifile->ctx->programs[j];
3325  int discard = AVDISCARD_ALL;
3326 
3327  for (k = 0; k < p->nb_stream_indexes; k++)
3328  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3329  discard = AVDISCARD_DEFAULT;
3330  break;
3331  }
3332  p->discard = discard;
3333  }
3334  }
3335 
3336  /* open files and write file headers */
3337  for (i = 0; i < nb_output_files; i++) {
3338  oc = output_files[i]->ctx;
3339  oc->interrupt_callback = int_cb;
3340  if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3341  snprintf(error, sizeof(error),
3342  "Could not write header for output file #%d "
3343  "(incorrect codec parameters ?): %s",
3344  i, av_err2str(ret));
3345  ret = AVERROR(EINVAL);
3346  goto dump_format;
3347  }
3348 // assert_avoptions(output_files[i]->opts);
3349  if (strcmp(oc->oformat->name, "rtp")) {
3350  want_sdp = 0;
3351  }
3352  }
3353 
3354  dump_format:
3355  /* dump the file output parameters - cannot be done before in case
3356  of stream copy */
3357  for (i = 0; i < nb_output_files; i++) {
3358  av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3359  }
3360 
3361  /* dump the stream mapping */
3362  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3363  for (i = 0; i < nb_input_streams; i++) {
3364  ist = input_streams[i];
3365 
3366  for (j = 0; j < ist->nb_filters; j++) {
3367  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3368  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3369  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3370  ist->filters[j]->name);
3371  if (nb_filtergraphs > 1)
3372  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3373  av_log(NULL, AV_LOG_INFO, "\n");
3374  }
3375  }
3376  }
3377 
3378  for (i = 0; i < nb_output_streams; i++) {
3379  ost = output_streams[i];
3380 
3381  if (ost->attachment_filename) {
3382  /* an attached file */
3383  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3384  ost->attachment_filename, ost->file_index, ost->index);
3385  continue;
3386  }
3387 
3388  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3389  /* output from a complex graph */
3390  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3391  if (nb_filtergraphs > 1)
3392  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3393 
3394  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3395  ost->index, ost->enc ? ost->enc->name : "?");
3396  continue;
3397  }
3398 
3399  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3400  input_streams[ost->source_index]->file_index,
3401  input_streams[ost->source_index]->st->index,
3402  ost->file_index,
3403  ost->index);
3404  if (ost->sync_ist != input_streams[ost->source_index])
3405  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3406  ost->sync_ist->file_index,
3407  ost->sync_ist->st->index);
3408  if (ost->stream_copy)
3409  av_log(NULL, AV_LOG_INFO, " (copy)");
3410  else {
3411  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3412  const AVCodec *out_codec = ost->enc;
3413  const char *decoder_name = "?";
3414  const char *in_codec_name = "?";
3415  const char *encoder_name = "?";
3416  const char *out_codec_name = "?";
3417  const AVCodecDescriptor *desc;
3418 
3419  if (in_codec) {
3420  decoder_name = in_codec->name;
3421  desc = avcodec_descriptor_get(in_codec->id);
3422  if (desc)
3423  in_codec_name = desc->name;
3424  if (!strcmp(decoder_name, in_codec_name))
3425  decoder_name = "native";
3426  }
3427 
3428  if (out_codec) {
3429  encoder_name = out_codec->name;
3430  desc = avcodec_descriptor_get(out_codec->id);
3431  if (desc)
3432  out_codec_name = desc->name;
3433  if (!strcmp(encoder_name, out_codec_name))
3434  encoder_name = "native";
3435  }
3436 
3437  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3438  in_codec_name, decoder_name,
3439  out_codec_name, encoder_name);
3440  }
3441  av_log(NULL, AV_LOG_INFO, "\n");
3442  }
3443 
3444  if (ret) {
3445  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3446  return ret;
3447  }
3448 
3449  if (sdp_filename || want_sdp) {
3450  print_sdp();
3451  }
3452 
3453  transcode_init_done = 1;
3454 
3455  return 0;
3456 }
3457 
3458 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3459 static int need_output(void)
3460 {
3461  int i;
3462 
3463  for (i = 0; i < nb_output_streams; i++) {
3464  OutputStream *ost = output_streams[i];
3465  OutputFile *of = output_files[ost->file_index];
3466  AVFormatContext *os = output_files[ost->file_index]->ctx;
3467 
3468  if (ost->finished ||
3469  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3470  continue;
3471  if (ost->frame_number >= ost->max_frames) {
3472  int j;
3473  for (j = 0; j < of->ctx->nb_streams; j++)
3474  close_output_stream(output_streams[of->ost_index + j]);
3475  continue;
3476  }
3477 
3478  return 1;
3479  }
3480 
3481  return 0;
3482 }
3483 
3484 /**
3485  * Select the output stream to process.
3486  *
3487  * @return selected output stream, or NULL if none available
3488  */
3490 {
3491  int i;
3492  int64_t opts_min = INT64_MAX;
3493  OutputStream *ost_min = NULL;
3494 
3495  for (i = 0; i < nb_output_streams; i++) {
3496  OutputStream *ost = output_streams[i];
3497  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3498  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3499  AV_TIME_BASE_Q);
3500  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3501  av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3502 
3503  if (!ost->finished && opts < opts_min) {
3504  opts_min = opts;
3505  ost_min = ost->unavailable ? NULL : ost;
3506  }
3507  }
3508  return ost_min;
3509 }
3510 
3511 static void set_tty_echo(int on)
3512 {
3513 #if HAVE_TERMIOS_H
3514  struct termios tty;
3515  if (tcgetattr(0, &tty) == 0) {
3516  if (on) tty.c_lflag |= ECHO;
3517  else tty.c_lflag &= ~ECHO;
3518  tcsetattr(0, TCSANOW, &tty);
3519  }
3520 #endif
3521 }
3522 
3524 {
3525  int i, ret, key;
3526  static int64_t last_time;
3527  if (received_nb_signals)
3528  return AVERROR_EXIT;
3529  /* read_key() returns 0 on EOF */
3530  if(cur_time - last_time >= 100000 && !run_as_daemon){
3531  key = read_key();
3532  last_time = cur_time;
3533  }else
3534  key = -1;
3535  if (key == 'q')
3536  return AVERROR_EXIT;
3537  if (key == '+') av_log_set_level(av_log_get_level()+10);
3538  if (key == '-') av_log_set_level(av_log_get_level()-10);
3539  if (key == 's') qp_hist ^= 1;
3540  if (key == 'h'){
3541  if (do_hex_dump){
3542  do_hex_dump = do_pkt_dump = 0;
3543  } else if(do_pkt_dump){
3544  do_hex_dump = 1;
3545  } else
3546  do_pkt_dump = 1;
3548  }
3549  if (key == 'c' || key == 'C'){
3550  char buf[4096], target[64], command[256], arg[256] = {0};
3551  double time;
3552  int k, n = 0;
3553  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3554  i = 0;
3555  set_tty_echo(1);
3556  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3557  if (k > 0)
3558  buf[i++] = k;
3559  buf[i] = 0;
3560  set_tty_echo(0);
3561  fprintf(stderr, "\n");
3562  if (k > 0 &&
3563  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3564  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3565  target, time, command, arg);
3566  for (i = 0; i < nb_filtergraphs; i++) {
3567  FilterGraph *fg = filtergraphs[i];
3568  if (fg->graph) {
3569  if (time < 0) {
3570  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3571  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3572  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3573  } else if (key == 'c') {
3574  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3575  ret = AVERROR_PATCHWELCOME;
3576  } else {
3577  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3578  if (ret < 0)
3579  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3580  }
3581  }
3582  }
3583  } else {
3585  "Parse error, at least 3 arguments were expected, "
3586  "only %d given in string '%s'\n", n, buf);
3587  }
3588  }
3589  if (key == 'd' || key == 'D'){
3590  int debug=0;
3591  if(key == 'D') {
3592  debug = input_streams[0]->st->codec->debug<<1;
3593  if(!debug) debug = 1;
3594  while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3595  debug += debug;
3596  }else{
3597  char buf[32];
3598  int k = 0;
3599  i = 0;
3600  set_tty_echo(1);
3601  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3602  if (k > 0)
3603  buf[i++] = k;
3604  buf[i] = 0;
3605  set_tty_echo(0);
3606  fprintf(stderr, "\n");
3607  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3608  fprintf(stderr,"error parsing debug value\n");
3609  }
3610  for(i=0;i<nb_input_streams;i++) {
3611  input_streams[i]->st->codec->debug = debug;
3612  }
3613  for(i=0;i<nb_output_streams;i++) {
3614  OutputStream *ost = output_streams[i];
3615  ost->enc_ctx->debug = debug;
3616  }
3617  if(debug) av_log_set_level(AV_LOG_DEBUG);
3618  fprintf(stderr,"debug=%d\n", debug);
3619  }
3620  if (key == '?'){
3621  fprintf(stderr, "key function\n"
3622  "? show this help\n"
3623  "+ increase verbosity\n"
3624  "- decrease verbosity\n"
3625  "c Send command to first matching filter supporting it\n"
3626  "C Send/Que command to all matching filters\n"
3627  "D cycle through available debug modes\n"
3628  "h dump packets/hex press to cycle through the 3 states\n"
3629  "q quit\n"
3630  "s Show QP histogram\n"
3631  );
3632  }
3633  return 0;
3634 }
3635 
3636 #if HAVE_PTHREADS
3637 static void *input_thread(void *arg)
3638 {
3639  InputFile *f = arg;
3640  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3641  int ret = 0;
3642 
3643  while (1) {
3644  AVPacket pkt;
3645  ret = av_read_frame(f->ctx, &pkt);
3646 
3647  if (ret == AVERROR(EAGAIN)) {
3648  av_usleep(10000);
3649  continue;
3650  }
3651  if (ret < 0) {
3652  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3653  break;
3654  }
3655  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3656  if (flags && ret == AVERROR(EAGAIN)) {
3657  flags = 0;
3658  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3660  "Thread message queue blocking; consider raising the "
3661  "thread_queue_size option (current value: %d)\n",
3662  f->thread_queue_size);
3663  }
3664  if (ret < 0) {
3665  if (ret != AVERROR_EOF)
3666  av_log(f->ctx, AV_LOG_ERROR,
3667  "Unable to send packet to main thread: %s\n",
3668  av_err2str(ret));
3669  av_packet_unref(&pkt);
3670  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3671  break;
3672  }
3673  }
3674 
3675  return NULL;
3676 }
3677 
3678 static void free_input_threads(void)
3679 {
3680  int i;
3681 
3682  for (i = 0; i < nb_input_files; i++) {
3683  InputFile *f = input_files[i];
3684  AVPacket pkt;
3685 
3686  if (!f || !f->in_thread_queue)
3687  continue;
3689  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3690  av_packet_unref(&pkt);
3691 
3692  pthread_join(f->thread, NULL);
3693  f->joined = 1;
3694  av_thread_message_queue_free(&f->in_thread_queue);
3695  }
3696 }
3697 
3698 static int init_input_threads(void)
3699 {
3700  int i, ret;
3701 
3702  if (nb_input_files == 1)
3703  return 0;
3704 
3705  for (i = 0; i < nb_input_files; i++) {
3706  InputFile *f = input_files[i];
3707 
3708  if (f->ctx->pb ? !f->ctx->pb->seekable :
3709  strcmp(f->ctx->iformat->name, "lavfi"))
3710  f->non_blocking = 1;
3711  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3712  f->thread_queue_size, sizeof(AVPacket));
3713  if (ret < 0)
3714  return ret;
3715 
3716  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3717  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3718  av_thread_message_queue_free(&f->in_thread_queue);
3719  return AVERROR(ret);
3720  }
3721  }
3722  return 0;
3723 }
3724 
3725 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3726 {
3727  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3728  f->non_blocking ?
3730 }
3731 #endif
3732 
3734 {
3735  if (f->rate_emu) {
3736  int i;
3737  for (i = 0; i < f->nb_streams; i++) {
3738  InputStream *ist = input_streams[f->ist_index + i];
3739  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3740  int64_t now = av_gettime_relative() - ist->start;
3741  if (pts > now)
3742  return AVERROR(EAGAIN);
3743  }
3744  }
3745 
3746 #if HAVE_PTHREADS
3747  if (nb_input_files > 1)
3748  return get_input_packet_mt(f, pkt);
3749 #endif
3750  return av_read_frame(f->ctx, pkt);
3751 }
3752 
3753 static int got_eagain(void)
3754 {
3755  int i;
3756  for (i = 0; i < nb_output_streams; i++)
3757  if (output_streams[i]->unavailable)
3758  return 1;
3759  return 0;
3760 }
3761 
3762 static void reset_eagain(void)
3763 {
3764  int i;
3765  for (i = 0; i < nb_input_files; i++)
3766  input_files[i]->eagain = 0;
3767  for (i = 0; i < nb_output_streams; i++)
3768  output_streams[i]->unavailable = 0;
3769 }
3770 
3771 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3772 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3773  AVRational time_base)
3774 {
3775  int ret;
3776 
3777  if (!*duration) {
3778  *duration = tmp;
3779  return tmp_time_base;
3780  }
3781 
3782  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3783  if (ret < 0) {
3784  *duration = tmp;
3785  return tmp_time_base;
3786  }
3787 
3788  return time_base;
3789 }
3790 
3791 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3792 {
3793  InputStream *ist;
3794  AVCodecContext *avctx;
3795  int i, ret, has_audio = 0;
3796  int64_t duration = 0;
3797 
3798  ret = av_seek_frame(is, -1, is->start_time, 0);
3799  if (ret < 0)
3800  return ret;
3801 
3802  for (i = 0; i < ifile->nb_streams; i++) {
3803  ist = input_streams[ifile->ist_index + i];
3804  avctx = ist->dec_ctx;
3805 
3806  // flush decoders
3807  if (ist->decoding_needed) {
3808  process_input_packet(ist, NULL, 1);
3809  avcodec_flush_buffers(avctx);
3810  }
3811 
3812  /* duration is the length of the last frame in a stream
3813  * when audio stream is present we don't care about
3814  * last video frame length because it's not defined exactly */
3815  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3816  has_audio = 1;
3817  }
3818 
3819  for (i = 0; i < ifile->nb_streams; i++) {
3820  ist = input_streams[ifile->ist_index + i];
3821  avctx = ist->dec_ctx;
3822 
3823  if (has_audio) {
3824  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3825  AVRational sample_rate = {1, avctx->sample_rate};
3826 
3827  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3828  } else
3829  continue;
3830  } else {
3831  if (ist->framerate.num) {
3832  duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3833  } else if (ist->st->avg_frame_rate.num) {
3834  duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3835  } else duration = 1;
3836  }
3837  if (!ifile->duration)
3838  ifile->time_base = ist->st->time_base;
3839  /* the total duration of the stream, max_pts - min_pts is
3840  * the duration of the stream without the last frame */
3841  duration += ist->max_pts - ist->min_pts;
3842  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3843  ifile->time_base);
3844  }
3845 
3846  if (ifile->loop > 0)
3847  ifile->loop--;
3848 
3849  return ret;
3850 }
3851 
3852 /*
3853  * Return
3854  * - 0 -- one packet was read and processed
3855  * - AVERROR(EAGAIN) -- no packets were available for selected file,
3856  * this function should be called again
3857  * - AVERROR_EOF -- this function should not be called again
3858  */
3859 static int process_input(int file_index)
3860 {
3861  InputFile *ifile = input_files[file_index];
3862  AVFormatContext *is;
3863  InputStream *ist;
3864  AVPacket pkt;
3865  int ret, i, j;
3866  int64_t duration;
3867  int64_t pkt_dts;
3868 
3869  is = ifile->ctx;
3870  ret = get_input_packet(ifile, &pkt);
3871 
3872  if (ret == AVERROR(EAGAIN)) {
3873  ifile->eagain = 1;
3874  return ret;
3875  }
3876  if (ret < 0 && ifile->loop) {
3877  if ((ret = seek_to_start(ifile, is)) < 0)
3878  return ret;
3879  ret = get_input_packet(ifile, &pkt);
3880  if (ret == AVERROR(EAGAIN)) {
3881  ifile->eagain = 1;
3882  return ret;
3883  }
3884  }
3885  if (ret < 0) {
3886  if (ret != AVERROR_EOF) {
3887  print_error(is->filename, ret);
3888  if (exit_on_error)
3889  exit_program(1);
3890  }
3891 
3892  for (i = 0; i < ifile->nb_streams; i++) {
3893  ist = input_streams[ifile->ist_index + i];
3894  if (ist->decoding_needed) {
3895  ret = process_input_packet(ist, NULL, 0);
3896  if (ret>0)
3897  return 0;
3898  }
3899 
3900  /* mark all outputs that don't go through lavfi as finished */
3901  for (j = 0; j < nb_output_streams; j++) {
3902  OutputStream *ost = output_streams[j];
3903 
3904  if (ost->source_index == ifile->ist_index + i &&
3905  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3906  finish_output_stream(ost);
3907  }
3908  }
3909 
3910  ifile->eof_reached = 1;
3911  return AVERROR(EAGAIN);
3912  }
3913 
3914  reset_eagain();
3915 
3916  if (do_pkt_dump) {
3918  is->streams[pkt.stream_index]);
3919  }
3920  /* the following test is needed in case new streams appear
3921  dynamically in stream : we ignore them */
3922  if (pkt.stream_index >= ifile->nb_streams) {
3923  report_new_stream(file_index, &pkt);
3924  goto discard_packet;
3925  }
3926 
3927  ist = input_streams[ifile->ist_index + pkt.stream_index];
3928 
3929  ist->data_size += pkt.size;
3930  ist->nb_packets++;
3931 
3932  if (ist->discard)
3933  goto discard_packet;
3934 
3935  if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
3936  av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
3937  exit_program(1);
3938  }
3939 
3940  if (debug_ts) {
3941  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3942  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3946  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3947  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3948  av_ts2str(input_files[ist->file_index]->ts_offset),
3949  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3950  }
3951 
3952  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3953  int64_t stime, stime2;
3954  // Correcting starttime based on the enabled streams
3955  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3956  // so we instead do it here as part of discontinuity handling
3957  if ( ist->next_dts == AV_NOPTS_VALUE
3958  && ifile->ts_offset == -is->start_time
3959  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3960  int64_t new_start_time = INT64_MAX;
3961  for (i=0; i<is->nb_streams; i++) {
3962  AVStream *st = is->streams[i];
3963  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3964  continue;
3965  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3966  }
3967  if (new_start_time > is->start_time) {
3968  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3969  ifile->ts_offset = -new_start_time;
3970  }
3971  }
3972 
3973  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3974  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3975  ist->wrap_correction_done = 1;
3976 
3977  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3978  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3979  ist->wrap_correction_done = 0;
3980  }
3981  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3982  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3983  ist->wrap_correction_done = 0;
3984  }
3985  }
3986 
3987  /* add the stream-global side data to the first packet */
3988  if (ist->nb_packets == 1) {
3989  if (ist->st->nb_side_data)
3991  for (i = 0; i < ist->st->nb_side_data; i++) {
3992  AVPacketSideData *src_sd = &ist->st->side_data[i];
3993  uint8_t *dst_data;
3994 
3995  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3996  continue;
3997  if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3998  continue;
3999 
4000  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4001  if (!dst_data)
4002  exit_program(1);
4003 
4004  memcpy(dst_data, src_sd->data, src_sd->size);
4005  }
4006  }
4007 
4008  if (pkt.dts != AV_NOPTS_VALUE)
4009  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4010  if (pkt.pts != AV_NOPTS_VALUE)
4011  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4012 
4013  if (pkt.pts != AV_NOPTS_VALUE)
4014  pkt.pts *= ist->ts_scale;
4015  if (pkt.dts != AV_NOPTS_VALUE)
4016  pkt.dts *= ist->ts_scale;
4017 
4019  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4021  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4022  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4023  int64_t delta = pkt_dts - ifile->last_ts;
4024  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4025  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4026  ifile->ts_offset -= delta;
4028  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4029  delta, ifile->ts_offset);
4030  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4031  if (pkt.pts != AV_NOPTS_VALUE)
4032  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4033  }
4034  }
4035 
4036  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4037  if (pkt.pts != AV_NOPTS_VALUE) {
4038  pkt.pts += duration;
4039  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4040  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4041  }
4042 
4043  if (pkt.dts != AV_NOPTS_VALUE)
4044  pkt.dts += duration;
4045 
4047  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4049  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4050  !copy_ts) {
4051  int64_t delta = pkt_dts - ist->next_dts;
4052  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4053  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4054  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4055  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4056  ifile->ts_offset -= delta;
4058  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4059  delta, ifile->ts_offset);
4060  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4061  if (pkt.pts != AV_NOPTS_VALUE)
4062  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4063  }
4064  } else {
4065  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4066  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4067  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4068  pkt.dts = AV_NOPTS_VALUE;
4069  }
4070  if (pkt.pts != AV_NOPTS_VALUE){
4071  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4072  delta = pkt_pts - ist->next_dts;
4073  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4074  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4075  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4076  pkt.pts = AV_NOPTS_VALUE;
4077  }
4078  }
4079  }
4080  }
4081 
4082  if (pkt.dts != AV_NOPTS_VALUE)
4083  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4084 
4085  if (debug_ts) {
4086  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4088  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4089  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4090  av_ts2str(input_files[ist->file_index]->ts_offset),
4091  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4092  }
4093 
4094  sub2video_heartbeat(ist, pkt.pts);
4095 
4096  process_input_packet(ist, &pkt, 0);
4097 
4098 discard_packet:
4099  av_packet_unref(&pkt);
4100 
4101  return 0;
4102 }
4103 
4104 /**
4105  * Perform a step of transcoding for the specified filter graph.
4106  *
4107  * @param[in] graph filter graph to consider
4108  * @param[out] best_ist input stream where a frame would allow to continue
4109  * @return 0 for success, <0 for error
4110  */
4111 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4112 {
4113  int i, ret;
4114  int nb_requests, nb_requests_max = 0;
4115  InputFilter *ifilter;
4116  InputStream *ist;
4117 
4118  *best_ist = NULL;
4119  ret = avfilter_graph_request_oldest(graph->graph);
4120  if (ret >= 0)
4121  return reap_filters(0);
4122 
4123  if (ret == AVERROR_EOF) {
4124  ret = reap_filters(1);
4125  for (i = 0; i < graph->nb_outputs; i++)
4126  close_output_stream(graph->outputs[i]->ost);
4127  return ret;
4128  }
4129  if (ret != AVERROR(EAGAIN))
4130  return ret;
4131 
4132  for (i = 0; i < graph->nb_inputs; i++) {
4133  ifilter = graph->inputs[i];
4134  ist = ifilter->ist;
4135  if (input_files[ist->file_index]->eagain ||
4136  input_files[ist->file_index]->eof_reached)
4137  continue;
4138  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4139  if (nb_requests > nb_requests_max) {
4140  nb_requests_max = nb_requests;
4141  *best_ist = ist;
4142  }
4143  }
4144 
4145  if (!*best_ist)
4146  for (i = 0; i < graph->nb_outputs; i++)
4147  graph->outputs[i]->ost->unavailable = 1;
4148 
4149  return 0;
4150 }
4151 
4152 /**
4153  * Run a single step of transcoding.
4154  *
4155  * @return 0 for success, <0 for error
4156  */
4157 static int transcode_step(void)
4158 {
4159  OutputStream *ost;
4160  InputStream *ist;
4161  int ret;
4162 
4163  ost = choose_output();
4164  if (!ost) {
4165  if (got_eagain()) {
4166  reset_eagain();
4167  av_usleep(10000);
4168  return 0;
4169  }
4170  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4171  return AVERROR_EOF;
4172  }
4173 
4174  if (ost->filter) {
4175  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4176  return ret;
4177  if (!ist)
4178  return 0;
4179  } else {
4180  av_assert0(ost->source_index >= 0);
4181  ist = input_streams[ost->source_index];
4182  }
4183 
4184  ret = process_input(ist->file_index);
4185  if (ret == AVERROR(EAGAIN)) {
4186  if (input_files[ist->file_index]->eagain)
4187  ost->unavailable = 1;
4188  return 0;
4189  }
4190 
4191  if (ret < 0)
4192  return ret == AVERROR_EOF ? 0 : ret;
4193 
4194  return reap_filters(0);
4195 }
4196 
4197 /*
4198  * The following code is the main loop of the file converter
4199  */
4200 static int transcode(void)
4201 {
4202  int ret, i;
4203  AVFormatContext *os;
4204  OutputStream *ost;
4205  InputStream *ist;
4206  int64_t timer_start;
4207  int64_t total_packets_written = 0;
4208 
4209  ret = transcode_init();
4210  if (ret < 0)
4211  goto fail;
4212 
4213  if (stdin_interaction) {
4214  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4215  }
4216 
4217  timer_start = av_gettime_relative();
4218 
4219 #if HAVE_PTHREADS
4220  if ((ret = init_input_threads()) < 0)
4221  goto fail;
4222 #endif
4223 
4224  while (!received_sigterm) {
4225  int64_t cur_time= av_gettime_relative();
4226 
4227  /* if 'q' pressed, exits */
4228  if (stdin_interaction)
4229  if (check_keyboard_interaction(cur_time) < 0)
4230  break;
4231 
4232  /* check if there's any stream where output is still needed */
4233  if (!need_output()) {
4234  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4235  break;
4236  }
4237 
4238  ret = transcode_step();
4239  if (ret < 0 && ret != AVERROR_EOF) {
4240  char errbuf[128];
4241  av_strerror(ret, errbuf, sizeof(errbuf));
4242 
4243  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4244  break;
4245  }
4246 
4247  /* dump report by using the output first video and audio streams */
4248  print_report(0, timer_start, cur_time);
4249  }
4250 #if HAVE_PTHREADS
4251  free_input_threads();
4252 #endif
4253 
4254  /* at the end of stream, we must flush the decoder buffers */
4255  for (i = 0; i < nb_input_streams; i++) {
4256  ist = input_streams[i];
4257  if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4258  process_input_packet(ist, NULL, 0);
4259  }
4260  }
4261  flush_encoders();
4262 
4263  term_exit();
4264 
4265  /* write the trailer if needed and close file */
4266  for (i = 0; i < nb_output_files; i++) {
4267  os = output_files[i]->ctx;
4268  if ((ret = av_write_trailer(os)) < 0) {
4269  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4270  if (exit_on_error)
4271  exit_program(1);
4272  }
4273  }
4274 
4275  /* dump report by using the first video and audio streams */
4276  print_report(1, timer_start, av_gettime_relative());
4277 
4278  /* close each encoder */
4279  for (i = 0; i < nb_output_streams; i++) {
4280  ost = output_streams[i];
4281  if (ost->encoding_needed) {
4282  av_freep(&ost->enc_ctx->stats_in);
4283  }
4284  total_packets_written += ost->packets_written;
4285  }
4286 
4287  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4288  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4289  exit_program(1);
4290  }
4291 
4292  /* close each decoder */
4293  for (i = 0; i < nb_input_streams; i++) {
4294  ist = input_streams[i];
4295  if (ist->decoding_needed) {
4296  avcodec_close(ist->dec_ctx);
4297  if (ist->hwaccel_uninit)
4298  ist->hwaccel_uninit(ist->dec_ctx);
4299  }
4300  }
4301 
4303 
4304  /* finished ! */
4305  ret = 0;
4306 
4307  fail:
4308 #if HAVE_PTHREADS
4309  free_input_threads();
4310 #endif
4311 
4312  if (output_streams) {
4313  for (i = 0; i < nb_output_streams; i++) {
4314  ost = output_streams[i];
4315  if (ost) {
4316  if (ost->logfile) {
4317  if (fclose(ost->logfile))
4319  "Error closing logfile, loss of information possible: %s\n",
4320  av_err2str(AVERROR(errno)));
4321  ost->logfile = NULL;
4322  }
4323  av_freep(&ost->forced_kf_pts);
4324  av_freep(&ost->apad);
4325  av_freep(&ost->disposition);
4326  av_dict_free(&ost->encoder_opts);
4327  av_dict_free(&ost->sws_dict);
4328  av_dict_free(&ost->swr_opts);
4329  av_dict_free(&ost->resample_opts);
4330  }
4331  }
4332  }
4333  return ret;
4334 }
4335 
4336 
4337 static int64_t getutime(void)
4338 {
4339 #if HAVE_GETRUSAGE
4340  struct rusage rusage;
4341 
4342  getrusage(RUSAGE_SELF, &rusage);
4343  return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4344 #elif HAVE_GETPROCESSTIMES
4345  HANDLE proc;
4346  FILETIME c, e, k, u;
4347  proc = GetCurrentProcess();
4348  GetProcessTimes(proc, &c, &e, &k, &u);
4349  return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4350 #else
4351  return av_gettime_relative();
4352 #endif
4353 }
4354 
4355 static int64_t getmaxrss(void)
4356 {
4357 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4358  struct rusage rusage;
4359  getrusage(RUSAGE_SELF, &rusage);
4360  return (int64_t)rusage.ru_maxrss * 1024;
4361 #elif HAVE_GETPROCESSMEMORYINFO
4362  HANDLE proc;
4363  PROCESS_MEMORY_COUNTERS memcounters;
4364  proc = GetCurrentProcess();
4365  memcounters.cb = sizeof(memcounters);
4366  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4367  return memcounters.PeakPagefileUsage;
4368 #else
4369  return 0;
4370 #endif
4371 }
4372 
4373 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4374 {
4375 }
4376 
4377 int main(int argc, char **argv)
4378 {
4379  int ret;
4380  int64_t ti;
4381 
4382  init_dynload();
4383 
4385 
4386  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4387 
4389  parse_loglevel(argc, argv, options);
4390 
4391  if(argc>1 && !strcmp(argv[1], "-d")){
4392  run_as_daemon=1;
4394  argc--;
4395  argv++;
4396  }
4397 
4399 #if CONFIG_AVDEVICE
4401 #endif
4403  av_register_all();
4405 
4406  show_banner(argc, argv, options);
4407 
4408  term_init();
4409 
4410  /* parse options and open all input/output files */
4411  ret = ffmpeg_parse_options(argc, argv);
4412  if (ret < 0)
4413  exit_program(1);
4414 
4415  if (nb_output_files <= 0 && nb_input_files == 0) {
4416  show_usage();
4417  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4418  exit_program(1);
4419  }
4420 
4421  /* file converter / grab */
4422  if (nb_output_files <= 0) {
4423  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4424  exit_program(1);
4425  }
4426 
4427 // if (nb_input_files == 0) {
4428 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4429 // exit_program(1);
4430 // }
4431 
4432  current_time = ti = getutime();
4433  if (transcode() < 0)
4434  exit_program(1);
4435  ti = getutime() - ti;
4436  if (do_benchmark) {
4437  av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4438  }
4439  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4442  exit_program(69);
4443 
4445  return main_return_code;
4446 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1535
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:279
int nb_bitstream_filters
Definition: ffmpeg.h:420
#define extra_bits(eb)
Definition: intrax8.c:159
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:894
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:113
uint32_t BOOL
#define FF_DEBUG_VIS_MB_TYPE
only access through AVOptions from outside libavcodec
Definition: avcodec.h:2938
enum AVChromaLocation chroma_location
Definition: avcodec.h:4069
int got_output
Definition: ffmpeg.h:308
#define AV_DISPOSITION_METADATA
Definition: avformat.h:865
void av_bsf_free(AVBSFContext **ctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:36
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:1830
int64_t first_dts
Timestamp corresponding to the last dts sync point.
Definition: avformat.h:1052
int guess_input_channel_layout(InputStream *ist)
Definition: ffmpeg.c:1943
#define NULL
Definition: coverity.c:32
int frame_number
Definition: ffmpeg.h:409
const struct AVCodec * codec
Definition: avcodec.h:1684
Definition: ffmpeg.h:386
AVRational framerate
Definition: avcodec.h:3374
enum AVFieldOrder field_order
Video only.
Definition: avcodec.h:4060
enum HWAccelID active_hwaccel_id
Definition: ffmpeg.h:335
const char * s
Definition: avisynth_c.h:768
Bytestream IO Context.
Definition: avio.h:147
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static enum AVPixelFormat pix_fmt
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:505
void term_init(void)
Definition: ffmpeg.c:366
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:309
AVCodecParameters * par_out
Parameters of the output stream.
Definition: avcodec.h:5768
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
int64_t av_frame_get_pkt_duration(const AVFrame *frame)
uint8_t * name
Definition: ffmpeg.h:238
int nb_outputs
Definition: ffmpeg.h:255
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:124
AVDictionary * swr_opts
Definition: ffmpeg.h:466
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:265
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2266
int resample_channels
Definition: ffmpeg.h:303
#define av_realloc_f(p, o, n)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:184
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
static int init_output_stream_streamcopy(OutputStream *ost)
Definition: ffmpeg.c:2676
void term_exit(void)
Definition: ffmpeg.c:308
int stream_copy
Definition: ffmpeg.h:471
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:1189
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3921
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1584
AVOption.
Definition: opt.h:245
AVRational frame_rate
Definition: ffmpeg.h:436
int64_t * forced_kf_pts
Definition: ffmpeg.h:445
void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, int err)
Set the receiving error code.
static void flush(AVCodecContext *avctx)
static void sub2video_flush(InputStream *ist)
Definition: ffmpeg.c:288
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:3014
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:342
char * filters
filtergraph associated to the -filter option
Definition: ffmpeg.h:461
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:873
#define CODEC_FLAG_PASS2
Definition: avcodec.h:1089
static int process_input(int file_index)
Definition: ffmpeg.c:3859
int exit_on_error
Definition: ffmpeg_opt.c:115
const char * fmt
Definition: avisynth_c.h:769
static int init_output_stream(OutputStream *ost, char *error, int error_len)
Definition: ffmpeg.c:2829
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
int64_t bit_rate
the average bitrate
Definition: avcodec.h:1740
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
Main libavfilter public API header.
int64_t pos
byte position in stream, -1 if unknown
Definition: avcodec.h:1620
uint8_t * bsf_extradata_updated
Definition: ffmpeg.h:421
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:79
static int run_as_daemon
Definition: ffmpeg.c:127
Memory buffer source API.
const char * desc
Definition: nvenc.c:101
void av_log_set_level(int level)
Set the log level.
Definition: log.c:391
AVRational framerate
Definition: ffmpeg.h:292
Immediately push the frame to the output.
Definition: buffersrc.h:47
AVCodecParserContext * parser
Definition: ffmpeg.h:479
static int64_t cur_time
Definition: ffserver.c:262
int64_t max_pts
Definition: ffmpeg.h:285
int decoding_needed
Definition: ffmpeg.h:263
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
Definition: avcodec.h:3979
const struct AVBitStreamFilter * filter
The bitstream filter this context is an instance of.
Definition: avcodec.h:5746
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
Definition: avformat.h:948
void av_codec_set_pkt_timebase(AVCodecContext *avctx, AVRational val)
int num
Numerator.
Definition: rational.h:59
The bitstream filter state.
Definition: avcodec.h:5737
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
Definition: ffmpeg.c:1539
#define vsnprintf
Definition: snprintf.h:36
int index
stream index in AVFormatContext
Definition: avformat.h:882
int size
Definition: avcodec.h:1601
static int64_t getmaxrss(void)
Definition: ffmpeg.c:4355
const char * b
Definition: vf_curves.c:113
static int nb_frames_dup
Definition: ffmpeg.c:128
int av_log2(unsigned v)
Definition: intmath.c:26
static InputStream * get_input_stream(OutputStream *ost)
Definition: ffmpeg.c:2628
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:607
#define AV_DISPOSITION_DUB
Definition: avformat.h:835
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:2086
int eagain
Definition: ffmpeg.h:357
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
attribute_deprecated int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of audio.
Definition: utils.c:1796
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1145
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1903
attribute_deprecated int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: utils.c:2303
static void abort_codec_experimental(AVCodec *c, int encoder)
Definition: ffmpeg.c:604
#define AV_DISPOSITION_HEARING_IMPAIRED
stream for hearing impaired audiences
Definition: avformat.h:847
int quality
Definition: ffmpeg.h:492
unsigned num_rects
Definition: avcodec.h:3959
AVFrame * filter_frame
Definition: ffmpeg.h:270
static int transcode_init(void)
Definition: ffmpeg.c:3061
static int compare_int64(const void *a, const void *b)
Definition: ffmpeg.c:2635
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
Definition: ffmpeg.c:2315
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
Definition: avcodec.h:2852
int do_benchmark_all
Definition: ffmpeg_opt.c:108
#define AV_DISPOSITION_CLEAN_EFFECTS
stream without voice
Definition: avformat.h:849
int last_dropped
Definition: ffmpeg.h:430
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition