FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdint.h>
34 
35 #if HAVE_IO_H
36 #include <io.h>
37 #endif
38 #if HAVE_UNISTD_H
39 #include <unistd.h>
40 #endif
41 
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
45 #include "libavutil/opt.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
64 
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
68 
69 #if HAVE_SYS_RESOURCE_H
70 #include <sys/time.h>
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
74 #include <windows.h>
75 #endif
76 #if HAVE_GETPROCESSMEMORYINFO
77 #include <windows.h>
78 #include <psapi.h>
79 #endif
80 #if HAVE_SETCONSOLECTRLHANDLER
81 #include <windows.h>
82 #endif
83 
84 
85 #if HAVE_SYS_SELECT_H
86 #include <sys/select.h>
87 #endif
88 
89 #if HAVE_TERMIOS_H
90 #include <fcntl.h>
91 #include <sys/ioctl.h>
92 #include <sys/time.h>
93 #include <termios.h>
94 #elif HAVE_KBHIT
95 #include <conio.h>
96 #endif
97 
98 #if HAVE_PTHREADS
99 #include <pthread.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
126 
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static unsigned dup_warning = 1000;
130 static int nb_frames_drop = 0;
131 static int64_t decode_error_stat[2];
132 
133 static int want_sdp = 1;
134 
135 static int current_time;
137 
139 
144 
149 
152 
153 #if HAVE_TERMIOS_H
154 
155 /* init terminal so that we can grab keys */
156 static struct termios oldtty;
157 static int restore_tty;
158 #endif
159 
160 #if HAVE_PTHREADS
161 static void free_input_threads(void);
162 #endif
163 
164 /* sub2video hack:
165  Convert subtitles to video with alpha to insert them in filter graphs.
166  This is a temporary solution until libavfilter gets real subtitles support.
167  */
168 
170 {
171  int ret;
172  AVFrame *frame = ist->sub2video.frame;
173 
174  av_frame_unref(frame);
175  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
176  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
178  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
179  return ret;
180  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
181  return 0;
182 }
183 
184 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
185  AVSubtitleRect *r)
186 {
187  uint32_t *pal, *dst2;
188  uint8_t *src, *src2;
189  int x, y;
190 
191  if (r->type != SUBTITLE_BITMAP) {
192  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
193  return;
194  }
195  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
196  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
197  r->x, r->y, r->w, r->h, w, h
198  );
199  return;
200  }
201 
202  dst += r->y * dst_linesize + r->x * 4;
203  src = r->data[0];
204  pal = (uint32_t *)r->data[1];
205  for (y = 0; y < r->h; y++) {
206  dst2 = (uint32_t *)dst;
207  src2 = src;
208  for (x = 0; x < r->w; x++)
209  *(dst2++) = pal[*(src2++)];
210  dst += dst_linesize;
211  src += r->linesize[0];
212  }
213 }
214 
215 static void sub2video_push_ref(InputStream *ist, int64_t pts)
216 {
217  AVFrame *frame = ist->sub2video.frame;
218  int i;
219 
220  av_assert1(frame->data[0]);
221  ist->sub2video.last_pts = frame->pts = pts;
222  for (i = 0; i < ist->nb_filters; i++)
226 }
227 
228 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
229 {
230  AVFrame *frame = ist->sub2video.frame;
231  int8_t *dst;
232  int dst_linesize;
233  int num_rects, i;
234  int64_t pts, end_pts;
235 
236  if (!frame)
237  return;
238  if (sub) {
239  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
240  AV_TIME_BASE_Q, ist->st->time_base);
241  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
242  AV_TIME_BASE_Q, ist->st->time_base);
243  num_rects = sub->num_rects;
244  } else {
245  pts = ist->sub2video.end_pts;
246  end_pts = INT64_MAX;
247  num_rects = 0;
248  }
249  if (sub2video_get_blank_frame(ist) < 0) {
251  "Impossible to get a blank canvas.\n");
252  return;
253  }
254  dst = frame->data [0];
255  dst_linesize = frame->linesize[0];
256  for (i = 0; i < num_rects; i++)
257  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
258  sub2video_push_ref(ist, pts);
259  ist->sub2video.end_pts = end_pts;
260 }
261 
262 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
263 {
264  InputFile *infile = input_files[ist->file_index];
265  int i, j, nb_reqs;
266  int64_t pts2;
267 
268  /* When a frame is read from a file, examine all sub2video streams in
269  the same file and send the sub2video frame again. Otherwise, decoded
270  video frames could be accumulating in the filter graph while a filter
271  (possibly overlay) is desperately waiting for a subtitle frame. */
272  for (i = 0; i < infile->nb_streams; i++) {
273  InputStream *ist2 = input_streams[infile->ist_index + i];
274  if (!ist2->sub2video.frame)
275  continue;
276  /* subtitles seem to be usually muxed ahead of other streams;
277  if not, subtracting a larger time here is necessary */
278  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
279  /* do not send the heartbeat frame if the subtitle is already ahead */
280  if (pts2 <= ist2->sub2video.last_pts)
281  continue;
282  if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
283  sub2video_update(ist2, NULL);
284  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
285  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
286  if (nb_reqs)
287  sub2video_push_ref(ist2, pts2);
288  }
289 }
290 
291 static void sub2video_flush(InputStream *ist)
292 {
293  int i;
294 
295  if (ist->sub2video.end_pts < INT64_MAX)
296  sub2video_update(ist, NULL);
297  for (i = 0; i < ist->nb_filters; i++)
299 }
300 
301 /* end of sub2video hack */
302 
303 static void term_exit_sigsafe(void)
304 {
305 #if HAVE_TERMIOS_H
306  if(restore_tty)
307  tcsetattr (0, TCSANOW, &oldtty);
308 #endif
309 }
310 
311 void term_exit(void)
312 {
313  av_log(NULL, AV_LOG_QUIET, "%s", "");
315 }
316 
317 static volatile int received_sigterm = 0;
318 static volatile int received_nb_signals = 0;
319 static volatile int transcode_init_done = 0;
320 static volatile int ffmpeg_exited = 0;
321 static int main_return_code = 0;
322 
323 static void
325 {
326  received_sigterm = sig;
329  if(received_nb_signals > 3) {
330  write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
331  strlen("Received > 3 system signals, hard exiting\n"));
332 
333  exit(123);
334  }
335 }
336 
337 #if HAVE_SETCONSOLECTRLHANDLER
338 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
339 {
340  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
341 
342  switch (fdwCtrlType)
343  {
344  case CTRL_C_EVENT:
345  case CTRL_BREAK_EVENT:
346  sigterm_handler(SIGINT);
347  return TRUE;
348 
349  case CTRL_CLOSE_EVENT:
350  case CTRL_LOGOFF_EVENT:
351  case CTRL_SHUTDOWN_EVENT:
352  sigterm_handler(SIGTERM);
353  /* Basically, with these 3 events, when we return from this method the
354  process is hard terminated, so stall as long as we need to
355  to try and let the main thread(s) clean up and gracefully terminate
356  (we have at most 5 seconds, but should be done far before that). */
357  while (!ffmpeg_exited) {
358  Sleep(0);
359  }
360  return TRUE;
361 
362  default:
363  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
364  return FALSE;
365  }
366 }
367 #endif
368 
369 void term_init(void)
370 {
371 #if HAVE_TERMIOS_H
373  struct termios tty;
374  if (tcgetattr (0, &tty) == 0) {
375  oldtty = tty;
376  restore_tty = 1;
377 
378  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
379  |INLCR|IGNCR|ICRNL|IXON);
380  tty.c_oflag |= OPOST;
381  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
382  tty.c_cflag &= ~(CSIZE|PARENB);
383  tty.c_cflag |= CS8;
384  tty.c_cc[VMIN] = 1;
385  tty.c_cc[VTIME] = 0;
386 
387  tcsetattr (0, TCSANOW, &tty);
388  }
389  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
390  }
391 #endif
392 
393  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
394  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
395 #ifdef SIGXCPU
396  signal(SIGXCPU, sigterm_handler);
397 #endif
398 #if HAVE_SETCONSOLECTRLHANDLER
399  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
400 #endif
401 }
402 
403 /* read a key without blocking */
404 static int read_key(void)
405 {
406  unsigned char ch;
407 #if HAVE_TERMIOS_H
408  int n = 1;
409  struct timeval tv;
410  fd_set rfds;
411 
412  FD_ZERO(&rfds);
413  FD_SET(0, &rfds);
414  tv.tv_sec = 0;
415  tv.tv_usec = 0;
416  n = select(1, &rfds, NULL, NULL, &tv);
417  if (n > 0) {
418  n = read(0, &ch, 1);
419  if (n == 1)
420  return ch;
421 
422  return n;
423  }
424 #elif HAVE_KBHIT
425 # if HAVE_PEEKNAMEDPIPE
426  static int is_pipe;
427  static HANDLE input_handle;
428  DWORD dw, nchars;
429  if(!input_handle){
430  input_handle = GetStdHandle(STD_INPUT_HANDLE);
431  is_pipe = !GetConsoleMode(input_handle, &dw);
432  }
433 
434  if (is_pipe) {
435  /* When running under a GUI, you will end here. */
436  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
437  // input pipe may have been closed by the program that ran ffmpeg
438  return -1;
439  }
440  //Read it
441  if(nchars != 0) {
442  read(0, &ch, 1);
443  return ch;
444  }else{
445  return -1;
446  }
447  }
448 # endif
449  if(kbhit())
450  return(getch());
451 #endif
452  return -1;
453 }
454 
455 static int decode_interrupt_cb(void *ctx)
456 {
458 }
459 
461 
462 static void ffmpeg_cleanup(int ret)
463 {
464  int i, j;
465 
466  if (do_benchmark) {
467  int maxrss = getmaxrss() / 1024;
468  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
469  }
470 
471  for (i = 0; i < nb_filtergraphs; i++) {
472  FilterGraph *fg = filtergraphs[i];
474  for (j = 0; j < fg->nb_inputs; j++) {
476  av_freep(&fg->inputs[j]->name);
477  av_freep(&fg->inputs[j]);
478  }
479  av_freep(&fg->inputs);
480  for (j = 0; j < fg->nb_outputs; j++) {
481  av_freep(&fg->outputs[j]->name);
482  av_freep(&fg->outputs[j]->formats);
483  av_freep(&fg->outputs[j]->channel_layouts);
484  av_freep(&fg->outputs[j]->sample_rates);
485  av_freep(&fg->outputs[j]);
486  }
487  av_freep(&fg->outputs);
488  av_freep(&fg->graph_desc);
489 
490  av_freep(&filtergraphs[i]);
491  }
492  av_freep(&filtergraphs);
493 
495 
496  /* close files */
497  for (i = 0; i < nb_output_files; i++) {
498  OutputFile *of = output_files[i];
500  if (!of)
501  continue;
502  s = of->ctx;
503  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
504  avio_closep(&s->pb);
506  av_dict_free(&of->opts);
507 
508  av_freep(&output_files[i]);
509  }
510  for (i = 0; i < nb_output_streams; i++) {
511  OutputStream *ost = output_streams[i];
512 
513  if (!ost)
514  continue;
515 
516  for (j = 0; j < ost->nb_bitstream_filters; j++)
517  av_bsf_free(&ost->bsf_ctx[j]);
518  av_freep(&ost->bsf_ctx);
520 
522  av_frame_free(&ost->last_frame);
523  av_dict_free(&ost->encoder_opts);
524 
525  av_parser_close(ost->parser);
527 
528  av_freep(&ost->forced_keyframes);
530  av_freep(&ost->avfilter);
531  av_freep(&ost->logfile_prefix);
532 
534  ost->audio_channels_mapped = 0;
535 
536  av_dict_free(&ost->sws_dict);
537 
540 
541  while (ost->muxing_queue && av_fifo_size(ost->muxing_queue)) {
542  AVPacket pkt;
543  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
544  av_packet_unref(&pkt);
545  }
547 
548  av_freep(&output_streams[i]);
549  }
550 #if HAVE_PTHREADS
551  free_input_threads();
552 #endif
553  for (i = 0; i < nb_input_files; i++) {
554  avformat_close_input(&input_files[i]->ctx);
555  av_freep(&input_files[i]);
556  }
557  for (i = 0; i < nb_input_streams; i++) {
558  InputStream *ist = input_streams[i];
559 
562  av_dict_free(&ist->decoder_opts);
565  av_freep(&ist->filters);
566  av_freep(&ist->hwaccel_device);
567  av_freep(&ist->dts_buffer);
568 
570 
571  av_freep(&input_streams[i]);
572  }
573 
574  if (vstats_file) {
575  if (fclose(vstats_file))
577  "Error closing vstats file, loss of information possible: %s\n",
578  av_err2str(AVERROR(errno)));
579  }
581 
582  av_freep(&input_streams);
583  av_freep(&input_files);
584  av_freep(&output_streams);
585  av_freep(&output_files);
586 
587  uninit_opts();
588 
590 
591  if (received_sigterm) {
592  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
593  (int) received_sigterm);
594  } else if (ret && transcode_init_done) {
595  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
596  }
597  term_exit();
598  ffmpeg_exited = 1;
599 }
600 
602 {
603  AVDictionaryEntry *t = NULL;
604 
605  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
607  }
608 }
609 
611 {
613  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
614  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
615  exit_program(1);
616  }
617 }
618 
619 static void abort_codec_experimental(AVCodec *c, int encoder)
620 {
621  exit_program(1);
622 }
623 
624 static void update_benchmark(const char *fmt, ...)
625 {
626  if (do_benchmark_all) {
627  int64_t t = getutime();
628  va_list va;
629  char buf[1024];
630 
631  if (fmt) {
632  va_start(va, fmt);
633  vsnprintf(buf, sizeof(buf), fmt, va);
634  va_end(va);
635  av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
636  }
637  current_time = t;
638  }
639 }
640 
641 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
642 {
643  int i;
644  for (i = 0; i < nb_output_streams; i++) {
645  OutputStream *ost2 = output_streams[i];
646  ost2->finished |= ost == ost2 ? this_stream : others;
647  }
648 }
649 
651 {
652  AVFormatContext *s = of->ctx;
653  AVStream *st = ost->st;
654  int ret;
655 
656  if (!of->header_written) {
657  AVPacket tmp_pkt;
658  /* the muxer is not initialized yet, buffer the packet */
659  if (!av_fifo_space(ost->muxing_queue)) {
660  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
661  ost->max_muxing_queue_size);
662  if (new_size <= av_fifo_size(ost->muxing_queue)) {
664  "Too many packets buffered for output stream %d:%d.\n",
665  ost->file_index, ost->st->index);
666  exit_program(1);
667  }
668  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
669  if (ret < 0)
670  exit_program(1);
671  }
672  av_packet_move_ref(&tmp_pkt, pkt);
673  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
674  return;
675  }
676 
679  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
680 
681  /*
682  * Audio encoders may split the packets -- #frames in != #packets out.
683  * But there is no reordering, so we can limit the number of output packets
684  * by simply dropping them here.
685  * Counting encoded video frames needs to be done separately because of
686  * reordering, see do_video_out()
687  */
688  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
689  if (ost->frame_number >= ost->max_frames) {
690  av_packet_unref(pkt);
691  return;
692  }
693  ost->frame_number++;
694  }
695  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
696  int i;
698  NULL);
699  ost->quality = sd ? AV_RL32(sd) : -1;
700  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
701 
702  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
703  if (sd && i < sd[5])
704  ost->error[i] = AV_RL64(sd + 8 + 8*i);
705  else
706  ost->error[i] = -1;
707  }
708 
709  if (ost->frame_rate.num && ost->is_cfr) {
710  if (pkt->duration > 0)
711  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
712  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
713  ost->st->time_base);
714  }
715  }
716 
717  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
718  if (pkt->dts != AV_NOPTS_VALUE &&
719  pkt->pts != AV_NOPTS_VALUE &&
720  pkt->dts > pkt->pts) {
721  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
722  pkt->dts, pkt->pts,
723  ost->file_index, ost->st->index);
724  pkt->pts =
725  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
726  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
727  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
728  }
730  pkt->dts != AV_NOPTS_VALUE &&
731  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
732  ost->last_mux_dts != AV_NOPTS_VALUE) {
733  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
734  if (pkt->dts < max) {
735  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
736  av_log(s, loglevel, "Non-monotonous DTS in output stream "
737  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
738  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
739  if (exit_on_error) {
740  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
741  exit_program(1);
742  }
743  av_log(s, loglevel, "changing to %"PRId64". This may result "
744  "in incorrect timestamps in the output file.\n",
745  max);
746  if (pkt->pts >= pkt->dts)
747  pkt->pts = FFMAX(pkt->pts, max);
748  pkt->dts = max;
749  }
750  }
751  }
752  ost->last_mux_dts = pkt->dts;
753 
754  ost->data_size += pkt->size;
755  ost->packets_written++;
756 
757  pkt->stream_index = ost->index;
758 
759  if (debug_ts) {
760  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
761  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
763  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
764  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
765  pkt->size
766  );
767  }
768 
769  ret = av_interleaved_write_frame(s, pkt);
770  if (ret < 0) {
771  print_error("av_interleaved_write_frame()", ret);
772  main_return_code = 1;
774  }
775  av_packet_unref(pkt);
776 }
777 
779 {
780  OutputFile *of = output_files[ost->file_index];
781 
782  ost->finished |= ENCODER_FINISHED;
783  if (of->shortest) {
784  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
785  of->recording_time = FFMIN(of->recording_time, end);
786  }
787 }
788 
790 {
791  int ret = 0;
792 
793  /* apply the output bitstream filters, if any */
794  if (ost->nb_bitstream_filters) {
795  int idx;
796 
798  ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
799  if (ret < 0)
800  goto finish;
801 
802  idx = 1;
803  while (idx) {
804  /* get a packet from the previous filter up the chain */
805  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
806  if (ret == AVERROR(EAGAIN)) {
807  ret = 0;
808  idx--;
809  continue;
810  } else if (ret < 0)
811  goto finish;
812  /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
813  * the api states this shouldn't happen after init(). Propagate it here to the
814  * muxer and to the next filters in the chain to workaround this.
815  * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
816  * par_out->extradata and adapt muxers accordingly to get rid of this. */
817  if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
818  ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
819  if (ret < 0)
820  goto finish;
821  ost->bsf_extradata_updated[idx - 1] |= 1;
822  }
823 
824  /* send it to the next filter down the chain or to the muxer */
825  if (idx < ost->nb_bitstream_filters) {
826  /* HACK/FIXME! - See above */
827  if (!(ost->bsf_extradata_updated[idx] & 2)) {
828  ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
829  if (ret < 0)
830  goto finish;
831  ost->bsf_extradata_updated[idx] |= 2;
832  }
833  ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
834  if (ret < 0)
835  goto finish;
836  idx++;
837  } else
838  write_packet(of, pkt, ost);
839  }
840  } else
841  write_packet(of, pkt, ost);
842 
843 finish:
844  if (ret < 0 && ret != AVERROR_EOF) {
845  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
846  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
847  if(exit_on_error)
848  exit_program(1);
849  }
850 }
851 
853 {
854  OutputFile *of = output_files[ost->file_index];
855 
856  if (of->recording_time != INT64_MAX &&
858  AV_TIME_BASE_Q) >= 0) {
859  close_output_stream(ost);
860  return 0;
861  }
862  return 1;
863 }
864 
865 static void do_audio_out(OutputFile *of, OutputStream *ost,
866  AVFrame *frame)
867 {
868  AVCodecContext *enc = ost->enc_ctx;
869  AVPacket pkt;
870  int ret;
871 
872  av_init_packet(&pkt);
873  pkt.data = NULL;
874  pkt.size = 0;
875 
876  if (!check_recording_time(ost))
877  return;
878 
879  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
880  frame->pts = ost->sync_opts;
881  ost->sync_opts = frame->pts + frame->nb_samples;
882  ost->samples_encoded += frame->nb_samples;
883  ost->frames_encoded++;
884 
885  av_assert0(pkt.size || !pkt.data);
887  if (debug_ts) {
888  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
889  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
890  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
891  enc->time_base.num, enc->time_base.den);
892  }
893 
894  ret = avcodec_send_frame(enc, frame);
895  if (ret < 0)
896  goto error;
897 
898  while (1) {
899  ret = avcodec_receive_packet(enc, &pkt);
900  if (ret == AVERROR(EAGAIN))
901  break;
902  if (ret < 0)
903  goto error;
904 
905  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
906 
907  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
908 
909  if (debug_ts) {
910  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
911  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
912  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
913  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
914  }
915 
916  output_packet(of, &pkt, ost);
917  }
918 
919  return;
920 error:
921  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
922  exit_program(1);
923 }
924 
925 static void do_subtitle_out(OutputFile *of,
926  OutputStream *ost,
927  AVSubtitle *sub)
928 {
929  int subtitle_out_max_size = 1024 * 1024;
930  int subtitle_out_size, nb, i;
931  AVCodecContext *enc;
932  AVPacket pkt;
933  int64_t pts;
934 
935  if (sub->pts == AV_NOPTS_VALUE) {
936  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
937  if (exit_on_error)
938  exit_program(1);
939  return;
940  }
941 
942  enc = ost->enc_ctx;
943 
944  if (!subtitle_out) {
945  subtitle_out = av_malloc(subtitle_out_max_size);
946  if (!subtitle_out) {
947  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
948  exit_program(1);
949  }
950  }
951 
952  /* Note: DVB subtitle need one packet to draw them and one other
953  packet to clear them */
954  /* XXX: signal it in the codec context ? */
956  nb = 2;
957  else
958  nb = 1;
959 
960  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
961  pts = sub->pts;
962  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
963  pts -= output_files[ost->file_index]->start_time;
964  for (i = 0; i < nb; i++) {
965  unsigned save_num_rects = sub->num_rects;
966 
967  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
968  if (!check_recording_time(ost))
969  return;
970 
971  sub->pts = pts;
972  // start_display_time is required to be 0
973  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
975  sub->start_display_time = 0;
976  if (i == 1)
977  sub->num_rects = 0;
978 
979  ost->frames_encoded++;
980 
981  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
982  subtitle_out_max_size, sub);
983  if (i == 1)
984  sub->num_rects = save_num_rects;
985  if (subtitle_out_size < 0) {
986  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
987  exit_program(1);
988  }
989 
990  av_init_packet(&pkt);
991  pkt.data = subtitle_out;
992  pkt.size = subtitle_out_size;
993  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
994  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
995  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
996  /* XXX: the pts correction is handled here. Maybe handling
997  it in the codec would be better */
998  if (i == 0)
999  pkt.pts += 90 * sub->start_display_time;
1000  else
1001  pkt.pts += 90 * sub->end_display_time;
1002  }
1003  pkt.dts = pkt.pts;
1004  output_packet(of, &pkt, ost);
1005  }
1006 }
1007 
1008 static void do_video_out(OutputFile *of,
1009  OutputStream *ost,
1010  AVFrame *next_picture,
1011  double sync_ipts)
1012 {
1013  int ret, format_video_sync;
1014  AVPacket pkt;
1015  AVCodecContext *enc = ost->enc_ctx;
1016  AVCodecParameters *mux_par = ost->st->codecpar;
1017  int nb_frames, nb0_frames, i;
1018  double delta, delta0;
1019  double duration = 0;
1020  int frame_size = 0;
1021  InputStream *ist = NULL;
1023 
1024  if (ost->source_index >= 0)
1025  ist = input_streams[ost->source_index];
1026 
1027  if (filter->inputs[0]->frame_rate.num > 0 &&
1028  filter->inputs[0]->frame_rate.den > 0)
1029  duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
1030 
1031  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1032  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1033 
1034  if (!ost->filters_script &&
1035  !ost->filters &&
1036  next_picture &&
1037  ist &&
1038  lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1039  duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1040  }
1041 
1042  if (!next_picture) {
1043  //end, flushing
1044  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1045  ost->last_nb0_frames[1],
1046  ost->last_nb0_frames[2]);
1047  } else {
1048  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1049  delta = delta0 + duration;
1050 
1051  /* by default, we output a single frame */
1052  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1053  nb_frames = 1;
1054 
1055  format_video_sync = video_sync_method;
1056  if (format_video_sync == VSYNC_AUTO) {
1057  if(!strcmp(of->ctx->oformat->name, "avi")) {
1058  format_video_sync = VSYNC_VFR;
1059  } else
1060  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1061  if ( ist
1062  && format_video_sync == VSYNC_CFR
1063  && input_files[ist->file_index]->ctx->nb_streams == 1
1064  && input_files[ist->file_index]->input_ts_offset == 0) {
1065  format_video_sync = VSYNC_VSCFR;
1066  }
1067  if (format_video_sync == VSYNC_CFR && copy_ts) {
1068  format_video_sync = VSYNC_VSCFR;
1069  }
1070  }
1071  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1072 
1073  if (delta0 < 0 &&
1074  delta > 0 &&
1075  format_video_sync != VSYNC_PASSTHROUGH &&
1076  format_video_sync != VSYNC_DROP) {
1077  if (delta0 < -0.6) {
1078  av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1079  } else
1080  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1081  sync_ipts = ost->sync_opts;
1082  duration += delta0;
1083  delta0 = 0;
1084  }
1085 
1086  switch (format_video_sync) {
1087  case VSYNC_VSCFR:
1088  if (ost->frame_number == 0 && delta0 >= 0.5) {
1089  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1090  delta = duration;
1091  delta0 = 0;
1092  ost->sync_opts = lrint(sync_ipts);
1093  }
1094  case VSYNC_CFR:
1095  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1096  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1097  nb_frames = 0;
1098  } else if (delta < -1.1)
1099  nb_frames = 0;
1100  else if (delta > 1.1) {
1101  nb_frames = lrintf(delta);
1102  if (delta0 > 1.1)
1103  nb0_frames = lrintf(delta0 - 0.6);
1104  }
1105  break;
1106  case VSYNC_VFR:
1107  if (delta <= -0.6)
1108  nb_frames = 0;
1109  else if (delta > 0.6)
1110  ost->sync_opts = lrint(sync_ipts);
1111  break;
1112  case VSYNC_DROP:
1113  case VSYNC_PASSTHROUGH:
1114  ost->sync_opts = lrint(sync_ipts);
1115  break;
1116  default:
1117  av_assert0(0);
1118  }
1119  }
1120 
1121  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1122  nb0_frames = FFMIN(nb0_frames, nb_frames);
1123 
1124  memmove(ost->last_nb0_frames + 1,
1125  ost->last_nb0_frames,
1126  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1127  ost->last_nb0_frames[0] = nb0_frames;
1128 
1129  if (nb0_frames == 0 && ost->last_dropped) {
1130  nb_frames_drop++;
1132  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1133  ost->frame_number, ost->st->index, ost->last_frame->pts);
1134  }
1135  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1136  if (nb_frames > dts_error_threshold * 30) {
1137  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1138  nb_frames_drop++;
1139  return;
1140  }
1141  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1142  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1143  if (nb_frames_dup > dup_warning) {
1144  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1145  dup_warning *= 10;
1146  }
1147  }
1148  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1149 
1150  /* duplicates frame if needed */
1151  for (i = 0; i < nb_frames; i++) {
1152  AVFrame *in_picture;
1153  av_init_packet(&pkt);
1154  pkt.data = NULL;
1155  pkt.size = 0;
1156 
1157  if (i < nb0_frames && ost->last_frame) {
1158  in_picture = ost->last_frame;
1159  } else
1160  in_picture = next_picture;
1161 
1162  if (!in_picture)
1163  return;
1164 
1165  in_picture->pts = ost->sync_opts;
1166 
1167 #if 1
1168  if (!check_recording_time(ost))
1169 #else
1170  if (ost->frame_number >= ost->max_frames)
1171 #endif
1172  return;
1173 
1174 #if FF_API_LAVF_FMT_RAWPICTURE
1175  if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1176  enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1177  /* raw pictures are written as AVPicture structure to
1178  avoid any copies. We support temporarily the older
1179  method. */
1180  if (in_picture->interlaced_frame)
1181  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1182  else
1183  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1184  pkt.data = (uint8_t *)in_picture;
1185  pkt.size = sizeof(AVPicture);
1186  pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1187  pkt.flags |= AV_PKT_FLAG_KEY;
1188 
1189  output_packet(of, &pkt, ost);
1190  } else
1191 #endif
1192  {
1193  int forced_keyframe = 0;
1194  double pts_time;
1195 
1197  ost->top_field_first >= 0)
1198  in_picture->top_field_first = !!ost->top_field_first;
1199 
1200  if (in_picture->interlaced_frame) {
1201  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1202  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1203  else
1204  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1205  } else
1206  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1207 
1208  in_picture->quality = enc->global_quality;
1209  in_picture->pict_type = 0;
1210 
1211  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1212  in_picture->pts * av_q2d(enc->time_base) : NAN;
1213  if (ost->forced_kf_index < ost->forced_kf_count &&
1214  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1215  ost->forced_kf_index++;
1216  forced_keyframe = 1;
1217  } else if (ost->forced_keyframes_pexpr) {
1218  double res;
1219  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1222  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1228  res);
1229  if (res) {
1230  forced_keyframe = 1;
1236  }
1237 
1239  } else if ( ost->forced_keyframes
1240  && !strncmp(ost->forced_keyframes, "source", 6)
1241  && in_picture->key_frame==1) {
1242  forced_keyframe = 1;
1243  }
1244 
1245  if (forced_keyframe) {
1246  in_picture->pict_type = AV_PICTURE_TYPE_I;
1247  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1248  }
1249 
1251  if (debug_ts) {
1252  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1253  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1254  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1255  enc->time_base.num, enc->time_base.den);
1256  }
1257 
1258  ost->frames_encoded++;
1259 
1260  ret = avcodec_send_frame(enc, in_picture);
1261  if (ret < 0)
1262  goto error;
1263 
1264  while (1) {
1265  ret = avcodec_receive_packet(enc, &pkt);
1266  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1267  if (ret == AVERROR(EAGAIN))
1268  break;
1269  if (ret < 0)
1270  goto error;
1271 
1272  if (debug_ts) {
1273  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1274  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1275  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1276  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1277  }
1278 
1279  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1280  pkt.pts = ost->sync_opts;
1281 
1282  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1283 
1284  if (debug_ts) {
1285  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1286  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1287  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1288  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1289  }
1290 
1291  frame_size = pkt.size;
1292  output_packet(of, &pkt, ost);
1293 
1294  /* if two pass, output log */
1295  if (ost->logfile && enc->stats_out) {
1296  fprintf(ost->logfile, "%s", enc->stats_out);
1297  }
1298  }
1299  }
1300  ost->sync_opts++;
1301  /*
1302  * For video, number of frames in == number of packets out.
1303  * But there may be reordering, so we can't throw away frames on encoder
1304  * flush, we need to limit them here, before they go into encoder.
1305  */
1306  ost->frame_number++;
1307 
1308  if (vstats_filename && frame_size)
1309  do_video_stats(ost, frame_size);
1310  }
1311 
1312  if (!ost->last_frame)
1313  ost->last_frame = av_frame_alloc();
1314  av_frame_unref(ost->last_frame);
1315  if (next_picture && ost->last_frame)
1316  av_frame_ref(ost->last_frame, next_picture);
1317  else
1318  av_frame_free(&ost->last_frame);
1319 
1320  return;
1321 error:
1322  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1323  exit_program(1);
1324 }
1325 
1326 static double psnr(double d)
1327 {
1328  return -10.0 * log10(d);
1329 }
1330 
1332 {
1333  AVCodecContext *enc;
1334  int frame_number;
1335  double ti1, bitrate, avg_bitrate;
1336 
1337  /* this is executed just the first time do_video_stats is called */
1338  if (!vstats_file) {
1339  vstats_file = fopen(vstats_filename, "w");
1340  if (!vstats_file) {
1341  perror("fopen");
1342  exit_program(1);
1343  }
1344  }
1345 
1346  enc = ost->enc_ctx;
1347  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1348  frame_number = ost->st->nb_frames;
1349  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1350  ost->quality / (float)FF_QP2LAMBDA);
1351 
1352  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1353  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1354 
1355  fprintf(vstats_file,"f_size= %6d ", frame_size);
1356  /* compute pts value */
1357  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1358  if (ti1 < 0.01)
1359  ti1 = 0.01;
1360 
1361  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1362  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1363  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1364  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1365  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1366  }
1367 }
1368 
1370 {
1371  OutputFile *of = output_files[ost->file_index];
1372  int i;
1373 
1375 
1376  if (of->shortest) {
1377  for (i = 0; i < of->ctx->nb_streams; i++)
1378  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1379  }
1380 }
1381 
1382 /**
1383  * Get and encode new output from any of the filtergraphs, without causing
1384  * activity.
1385  *
1386  * @return 0 for success, <0 for severe errors
1387  */
1388 static int reap_filters(int flush)
1389 {
1390  AVFrame *filtered_frame = NULL;
1391  int i;
1392 
1393  /* Reap all buffers present in the buffer sinks */
1394  for (i = 0; i < nb_output_streams; i++) {
1395  OutputStream *ost = output_streams[i];
1396  OutputFile *of = output_files[ost->file_index];
1398  AVCodecContext *enc = ost->enc_ctx;
1399  int ret = 0;
1400 
1401  if (!ost->filter)
1402  continue;
1403  filter = ost->filter->filter;
1404 
1405  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1406  return AVERROR(ENOMEM);
1407  }
1408  filtered_frame = ost->filtered_frame;
1409 
1410  while (1) {
1411  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1412  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1414  if (ret < 0) {
1415  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1417  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1418  } else if (flush && ret == AVERROR_EOF) {
1419  if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1420  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1421  }
1422  break;
1423  }
1424  if (ost->finished) {
1425  av_frame_unref(filtered_frame);
1426  continue;
1427  }
1428  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1429  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1430  AVRational tb = enc->time_base;
1431  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1432 
1433  tb.den <<= extra_bits;
1434  float_pts =
1435  av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1436  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1437  float_pts /= 1 << extra_bits;
1438  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1439  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1440 
1441  filtered_frame->pts =
1442  av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1443  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1444  }
1445  //if (ost->source_index >= 0)
1446  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1447 
1448  switch (filter->inputs[0]->type) {
1449  case AVMEDIA_TYPE_VIDEO:
1450  if (!ost->frame_aspect_ratio.num)
1451  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1452 
1453  if (debug_ts) {
1454  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1455  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1456  float_pts,
1457  enc->time_base.num, enc->time_base.den);
1458  }
1459 
1460  do_video_out(of, ost, filtered_frame, float_pts);
1461  break;
1462  case AVMEDIA_TYPE_AUDIO:
1463  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1464  enc->channels != av_frame_get_channels(filtered_frame)) {
1466  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1467  break;
1468  }
1469  do_audio_out(of, ost, filtered_frame);
1470  break;
1471  default:
1472  // TODO support subtitle filters
1473  av_assert0(0);
1474  }
1475 
1476  av_frame_unref(filtered_frame);
1477  }
1478  }
1479 
1480  return 0;
1481 }
1482 
1483 static void print_final_stats(int64_t total_size)
1484 {
1485  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1486  uint64_t subtitle_size = 0;
1487  uint64_t data_size = 0;
1488  float percent = -1.0;
1489  int i, j;
1490  int pass1_used = 1;
1491 
1492  for (i = 0; i < nb_output_streams; i++) {
1493  OutputStream *ost = output_streams[i];
1494  switch (ost->enc_ctx->codec_type) {
1495  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1496  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1497  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1498  default: other_size += ost->data_size; break;
1499  }
1500  extra_size += ost->enc_ctx->extradata_size;
1501  data_size += ost->data_size;
1504  pass1_used = 0;
1505  }
1506 
1507  if (data_size && total_size>0 && total_size >= data_size)
1508  percent = 100.0 * (total_size - data_size) / data_size;
1509 
1510  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1511  video_size / 1024.0,
1512  audio_size / 1024.0,
1513  subtitle_size / 1024.0,
1514  other_size / 1024.0,
1515  extra_size / 1024.0);
1516  if (percent >= 0.0)
1517  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1518  else
1519  av_log(NULL, AV_LOG_INFO, "unknown");
1520  av_log(NULL, AV_LOG_INFO, "\n");
1521 
1522  /* print verbose per-stream stats */
1523  for (i = 0; i < nb_input_files; i++) {
1524  InputFile *f = input_files[i];
1525  uint64_t total_packets = 0, total_size = 0;
1526 
1527  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1528  i, f->ctx->filename);
1529 
1530  for (j = 0; j < f->nb_streams; j++) {
1531  InputStream *ist = input_streams[f->ist_index + j];
1532  enum AVMediaType type = ist->dec_ctx->codec_type;
1533 
1534  total_size += ist->data_size;
1535  total_packets += ist->nb_packets;
1536 
1537  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1538  i, j, media_type_string(type));
1539  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1540  ist->nb_packets, ist->data_size);
1541 
1542  if (ist->decoding_needed) {
1543  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1544  ist->frames_decoded);
1545  if (type == AVMEDIA_TYPE_AUDIO)
1546  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1547  av_log(NULL, AV_LOG_VERBOSE, "; ");
1548  }
1549 
1550  av_log(NULL, AV_LOG_VERBOSE, "\n");
1551  }
1552 
1553  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1554  total_packets, total_size);
1555  }
1556 
1557  for (i = 0; i < nb_output_files; i++) {
1558  OutputFile *of = output_files[i];
1559  uint64_t total_packets = 0, total_size = 0;
1560 
1561  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1562  i, of->ctx->filename);
1563 
1564  for (j = 0; j < of->ctx->nb_streams; j++) {
1565  OutputStream *ost = output_streams[of->ost_index + j];
1566  enum AVMediaType type = ost->enc_ctx->codec_type;
1567 
1568  total_size += ost->data_size;
1569  total_packets += ost->packets_written;
1570 
1571  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1572  i, j, media_type_string(type));
1573  if (ost->encoding_needed) {
1574  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1575  ost->frames_encoded);
1576  if (type == AVMEDIA_TYPE_AUDIO)
1577  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1578  av_log(NULL, AV_LOG_VERBOSE, "; ");
1579  }
1580 
1581  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1582  ost->packets_written, ost->data_size);
1583 
1584  av_log(NULL, AV_LOG_VERBOSE, "\n");
1585  }
1586 
1587  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1588  total_packets, total_size);
1589  }
1590  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1591  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1592  if (pass1_used) {
1593  av_log(NULL, AV_LOG_WARNING, "\n");
1594  } else {
1595  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1596  }
1597  }
1598 }
1599 
1600 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1601 {
1602  char buf[1024];
1603  AVBPrint buf_script;
1604  OutputStream *ost;
1605  AVFormatContext *oc;
1606  int64_t total_size;
1607  AVCodecContext *enc;
1608  int frame_number, vid, i;
1609  double bitrate;
1610  double speed;
1611  int64_t pts = INT64_MIN + 1;
1612  static int64_t last_time = -1;
1613  static int qp_histogram[52];
1614  int hours, mins, secs, us;
1615  int ret;
1616  float t;
1617 
1618  if (!print_stats && !is_last_report && !progress_avio)
1619  return;
1620 
1621  if (!is_last_report) {
1622  if (last_time == -1) {
1623  last_time = cur_time;
1624  return;
1625  }
1626  if ((cur_time - last_time) < 500000)
1627  return;
1628  last_time = cur_time;
1629  }
1630 
1631  t = (cur_time-timer_start) / 1000000.0;
1632 
1633 
1634  oc = output_files[0]->ctx;
1635 
1636  total_size = avio_size(oc->pb);
1637  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1638  total_size = avio_tell(oc->pb);
1639 
1640  buf[0] = '\0';
1641  vid = 0;
1642  av_bprint_init(&buf_script, 0, 1);
1643  for (i = 0; i < nb_output_streams; i++) {
1644  float q = -1;
1645  ost = output_streams[i];
1646  enc = ost->enc_ctx;
1647  if (!ost->stream_copy)
1648  q = ost->quality / (float) FF_QP2LAMBDA;
1649 
1650  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1651  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1652  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1653  ost->file_index, ost->index, q);
1654  }
1655  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1656  float fps;
1657 
1658  frame_number = ost->frame_number;
1659  fps = t > 1 ? frame_number / t : 0;
1660  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1661  frame_number, fps < 9.95, fps, q);
1662  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1663  av_bprintf(&buf_script, "fps=%.1f\n", fps);
1664  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1665  ost->file_index, ost->index, q);
1666  if (is_last_report)
1667  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1668  if (qp_hist) {
1669  int j;
1670  int qp = lrintf(q);
1671  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1672  qp_histogram[qp]++;
1673  for (j = 0; j < 32; j++)
1674  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1675  }
1676 
1677  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1678  int j;
1679  double error, error_sum = 0;
1680  double scale, scale_sum = 0;
1681  double p;
1682  char type[3] = { 'Y','U','V' };
1683  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1684  for (j = 0; j < 3; j++) {
1685  if (is_last_report) {
1686  error = enc->error[j];
1687  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1688  } else {
1689  error = ost->error[j];
1690  scale = enc->width * enc->height * 255.0 * 255.0;
1691  }
1692  if (j)
1693  scale /= 4;
1694  error_sum += error;
1695  scale_sum += scale;
1696  p = psnr(error / scale);
1697  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1698  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1699  ost->file_index, ost->index, type[j] | 32, p);
1700  }
1701  p = psnr(error_sum / scale_sum);
1702  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1703  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1704  ost->file_index, ost->index, p);
1705  }
1706  vid = 1;
1707  }
1708  /* compute min output value */
1710  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1711  ost->st->time_base, AV_TIME_BASE_Q));
1712  if (is_last_report)
1713  nb_frames_drop += ost->last_dropped;
1714  }
1715 
1716  secs = FFABS(pts) / AV_TIME_BASE;
1717  us = FFABS(pts) % AV_TIME_BASE;
1718  mins = secs / 60;
1719  secs %= 60;
1720  hours = mins / 60;
1721  mins %= 60;
1722 
1723  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1724  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1725 
1726  if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1727  "size=N/A time=");
1728  else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1729  "size=%8.0fkB time=", total_size / 1024.0);
1730  if (pts < 0)
1731  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1732  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1733  "%02d:%02d:%02d.%02d ", hours, mins, secs,
1734  (100 * us) / AV_TIME_BASE);
1735 
1736  if (bitrate < 0) {
1737  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1738  av_bprintf(&buf_script, "bitrate=N/A\n");
1739  }else{
1740  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1741  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1742  }
1743 
1744  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1745  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1746  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1747  av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1748  hours, mins, secs, us);
1749 
1751  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1753  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1754  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1755 
1756  if (speed < 0) {
1757  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1758  av_bprintf(&buf_script, "speed=N/A\n");
1759  } else {
1760  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1761  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1762  }
1763 
1764  if (print_stats || is_last_report) {
1765  const char end = is_last_report ? '\n' : '\r';
1766  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1767  fprintf(stderr, "%s %c", buf, end);
1768  } else
1769  av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1770 
1771  fflush(stderr);
1772  }
1773 
1774  if (progress_avio) {
1775  av_bprintf(&buf_script, "progress=%s\n",
1776  is_last_report ? "end" : "continue");
1777  avio_write(progress_avio, buf_script.str,
1778  FFMIN(buf_script.len, buf_script.size - 1));
1779  avio_flush(progress_avio);
1780  av_bprint_finalize(&buf_script, NULL);
1781  if (is_last_report) {
1782  if ((ret = avio_closep(&progress_avio)) < 0)
1784  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1785  }
1786  }
1787 
1788  if (is_last_report)
1789  print_final_stats(total_size);
1790 }
1791 
1792 static void flush_encoders(void)
1793 {
1794  int i, ret;
1795 
1796  for (i = 0; i < nb_output_streams; i++) {
1797  OutputStream *ost = output_streams[i];
1798  AVCodecContext *enc = ost->enc_ctx;
1799  OutputFile *of = output_files[ost->file_index];
1800  int stop_encoding = 0;
1801 
1802  if (!ost->encoding_needed)
1803  continue;
1804 
1805  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1806  continue;
1807 #if FF_API_LAVF_FMT_RAWPICTURE
1808  if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1809  continue;
1810 #endif
1811 
1813  continue;
1814 
1815  avcodec_send_frame(enc, NULL);
1816 
1817  for (;;) {
1818  const char *desc = NULL;
1819 
1820  switch (enc->codec_type) {
1821  case AVMEDIA_TYPE_AUDIO:
1822  desc = "audio";
1823  break;
1824  case AVMEDIA_TYPE_VIDEO:
1825  desc = "video";
1826  break;
1827  default:
1828  av_assert0(0);
1829  }
1830 
1831  if (1) {
1832  AVPacket pkt;
1833  int pkt_size;
1834  av_init_packet(&pkt);
1835  pkt.data = NULL;
1836  pkt.size = 0;
1837 
1839  ret = avcodec_receive_packet(enc, &pkt);
1840  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1841  if (ret < 0 && ret != AVERROR_EOF) {
1842  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1843  desc,
1844  av_err2str(ret));
1845  exit_program(1);
1846  }
1847  if (ost->logfile && enc->stats_out) {
1848  fprintf(ost->logfile, "%s", enc->stats_out);
1849  }
1850  if (ret == AVERROR_EOF) {
1851  stop_encoding = 1;
1852  break;
1853  }
1854  if (ost->finished & MUXER_FINISHED) {
1855  av_packet_unref(&pkt);
1856  continue;
1857  }
1858  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1859  pkt_size = pkt.size;
1860  output_packet(of, &pkt, ost);
1862  do_video_stats(ost, pkt_size);
1863  }
1864  }
1865 
1866  if (stop_encoding)
1867  break;
1868  }
1869  }
1870 }
1871 
1872 /*
1873  * Check whether a packet from ist should be written into ost at this time
1874  */
1876 {
1877  OutputFile *of = output_files[ost->file_index];
1878  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1879 
1880  if (ost->source_index != ist_index)
1881  return 0;
1882 
1883  if (ost->finished)
1884  return 0;
1885 
1886  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1887  return 0;
1888 
1889  return 1;
1890 }
1891 
1892 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1893 {
1894  OutputFile *of = output_files[ost->file_index];
1895  InputFile *f = input_files [ist->file_index];
1896  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1897  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1898  AVPicture pict;
1899  AVPacket opkt;
1900 
1901  av_init_packet(&opkt);
1902 
1903  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1905  return;
1906 
1907  if (!ost->frame_number && !ost->copy_prior_start) {
1908  int64_t comp_start = start_time;
1909  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1910  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1911  if (pkt->pts == AV_NOPTS_VALUE ?
1912  ist->pts < comp_start :
1913  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1914  return;
1915  }
1916 
1917  if (of->recording_time != INT64_MAX &&
1918  ist->pts >= of->recording_time + start_time) {
1919  close_output_stream(ost);
1920  return;
1921  }
1922 
1923  if (f->recording_time != INT64_MAX) {
1924  start_time = f->ctx->start_time;
1925  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1926  start_time += f->start_time;
1927  if (ist->pts >= f->recording_time + start_time) {
1928  close_output_stream(ost);
1929  return;
1930  }
1931  }
1932 
1933  /* force the input stream PTS */
1934  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1935  ost->sync_opts++;
1936 
1937  if (pkt->pts != AV_NOPTS_VALUE)
1938  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1939  else
1940  opkt.pts = AV_NOPTS_VALUE;
1941 
1942  if (pkt->dts == AV_NOPTS_VALUE)
1943  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1944  else
1945  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1946  opkt.dts -= ost_tb_start_time;
1947 
1948  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1950  if(!duration)
1951  duration = ist->dec_ctx->frame_size;
1952  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1954  ost->st->time_base) - ost_tb_start_time;
1955  }
1956 
1957  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1958  opkt.flags = pkt->flags;
1959  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1960  if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
1961  && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
1962  && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
1963  && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
1964  ) {
1965  int ret = av_parser_change(ost->parser, ost->parser_avctx,
1966  &opkt.data, &opkt.size,
1967  pkt->data, pkt->size,
1969  if (ret < 0) {
1970  av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1971  av_err2str(ret));
1972  exit_program(1);
1973  }
1974  if (ret) {
1975  opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1976  if (!opkt.buf)
1977  exit_program(1);
1978  }
1979  } else {
1980  opkt.data = pkt->data;
1981  opkt.size = pkt->size;
1982  }
1983  av_copy_packet_side_data(&opkt, pkt);
1984 
1985 #if FF_API_LAVF_FMT_RAWPICTURE
1986  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
1987  ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
1988  (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1989  /* store AVPicture in AVPacket, as expected by the output format */
1990  int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
1991  if (ret < 0) {
1992  av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1993  av_err2str(ret));
1994  exit_program(1);
1995  }
1996  opkt.data = (uint8_t *)&pict;
1997  opkt.size = sizeof(AVPicture);
1998  opkt.flags |= AV_PKT_FLAG_KEY;
1999  }
2000 #endif
2001 
2002  output_packet(of, &opkt, ost);
2003 }
2004 
2006 {
2007  AVCodecContext *dec = ist->dec_ctx;
2008 
2009  if (!dec->channel_layout) {
2010  char layout_name[256];
2011 
2012  if (dec->channels > ist->guess_layout_max)
2013  return 0;
2015  if (!dec->channel_layout)
2016  return 0;
2017  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2018  dec->channels, dec->channel_layout);
2019  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2020  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2021  }
2022  return 1;
2023 }
2024 
2025 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2026 {
2027  if (*got_output || ret<0)
2028  decode_error_stat[ret<0] ++;
2029 
2030  if (ret < 0 && exit_on_error)
2031  exit_program(1);
2032 
2033  if (exit_on_error && *got_output && ist) {
2035  av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2036  exit_program(1);
2037  }
2038  }
2039 }
2040 
2041 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2042 // There is the following difference: if you got a frame, you must call
2043 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2044 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2045 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2046 {
2047  int ret;
2048 
2049  *got_frame = 0;
2050 
2051  if (pkt) {
2052  ret = avcodec_send_packet(avctx, pkt);
2053  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2054  // decoded frames with avcodec_receive_frame() until done.
2055  if (ret < 0 && ret != AVERROR_EOF)
2056  return ret;
2057  }
2058 
2059  ret = avcodec_receive_frame(avctx, frame);
2060  if (ret < 0 && ret != AVERROR(EAGAIN))
2061  return ret;
2062  if (ret >= 0)
2063  *got_frame = 1;
2064 
2065  return 0;
2066 }
2067 
2068 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2069 {
2070  int i, ret;
2071  AVFrame *f;
2072 
2073  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2074  for (i = 0; i < ist->nb_filters; i++) {
2075  if (i < ist->nb_filters - 1) {
2076  f = ist->filter_frame;
2077  ret = av_frame_ref(f, decoded_frame);
2078  if (ret < 0)
2079  break;
2080  } else
2081  f = decoded_frame;
2082  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2084  if (ret == AVERROR_EOF)
2085  ret = 0; /* ignore */
2086  if (ret < 0) {
2088  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2089  break;
2090  }
2091  }
2092  return ret;
2093 }
2094 
2095 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2096 {
2097  AVFrame *decoded_frame;
2098  AVCodecContext *avctx = ist->dec_ctx;
2099  int i, ret, err = 0, resample_changed;
2100  AVRational decoded_frame_tb;
2101 
2102  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2103  return AVERROR(ENOMEM);
2104  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2105  return AVERROR(ENOMEM);
2106  decoded_frame = ist->decoded_frame;
2107 
2109  ret = decode(avctx, decoded_frame, got_output, pkt);
2110  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2111 
2112  if (ret >= 0 && avctx->sample_rate <= 0) {
2113  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2114  ret = AVERROR_INVALIDDATA;
2115  }
2116 
2117  if (ret != AVERROR_EOF)
2118  check_decode_result(ist, got_output, ret);
2119 
2120  if (!*got_output || ret < 0)
2121  return ret;
2122 
2123  ist->samples_decoded += decoded_frame->nb_samples;
2124  ist->frames_decoded++;
2125 
2126 #if 1
2127  /* increment next_dts to use for the case where the input stream does not
2128  have timestamps or there are multiple frames in the packet */
2129  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2130  avctx->sample_rate;
2131  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2132  avctx->sample_rate;
2133 #endif
2134 
2135  resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
2136  ist->resample_channels != avctx->channels ||
2137  ist->resample_channel_layout != decoded_frame->channel_layout ||
2138  ist->resample_sample_rate != decoded_frame->sample_rate;
2139  if (resample_changed) {
2140  char layout1[64], layout2[64];
2141 
2142  if (!guess_input_channel_layout(ist)) {
2143  av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
2144  "layout for Input Stream #%d.%d\n", ist->file_index,
2145  ist->st->index);
2146  exit_program(1);
2147  }
2148  decoded_frame->channel_layout = avctx->channel_layout;
2149 
2150  av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2152  av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2153  decoded_frame->channel_layout);
2154 
2156  "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2157  ist->file_index, ist->st->index,
2159  ist->resample_channels, layout1,
2160  decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2161  avctx->channels, layout2);
2162 
2163  ist->resample_sample_fmt = decoded_frame->format;
2164  ist->resample_sample_rate = decoded_frame->sample_rate;
2165  ist->resample_channel_layout = decoded_frame->channel_layout;
2166  ist->resample_channels = avctx->channels;
2167 
2168  for (i = 0; i < ist->nb_filters; i++) {
2169  err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
2170  if (err < 0) {
2172  "Error reconfiguring input stream %d:%d filter %d\n",
2173  ist->file_index, ist->st->index, i);
2174  goto fail;
2175  }
2176  }
2177 
2178  for (i = 0; i < nb_filtergraphs; i++)
2179  if (ist_in_filtergraph(filtergraphs[i], ist)) {
2180  FilterGraph *fg = filtergraphs[i];
2181  if (configure_filtergraph(fg) < 0) {
2182  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2183  exit_program(1);
2184  }
2185  }
2186  }
2187 
2188  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2189  decoded_frame_tb = ist->st->time_base;
2190  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2191  decoded_frame->pts = pkt->pts;
2192  decoded_frame_tb = ist->st->time_base;
2193  }else {
2194  decoded_frame->pts = ist->dts;
2195  decoded_frame_tb = AV_TIME_BASE_Q;
2196  }
2197  if (decoded_frame->pts != AV_NOPTS_VALUE)
2198  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2199  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2200  (AVRational){1, avctx->sample_rate});
2201  ist->nb_samples = decoded_frame->nb_samples;
2202  err = send_frame_to_filters(ist, decoded_frame);
2203  decoded_frame->pts = AV_NOPTS_VALUE;
2204 
2205 fail:
2206  av_frame_unref(ist->filter_frame);
2207  av_frame_unref(decoded_frame);
2208  return err < 0 ? err : ret;
2209 }
2210 
2211 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof)
2212 {
2213  AVFrame *decoded_frame;
2214  int i, ret = 0, err = 0, resample_changed;
2215  int64_t best_effort_timestamp;
2216  int64_t dts = AV_NOPTS_VALUE;
2217  AVPacket avpkt;
2218 
2219  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2220  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2221  // skip the packet.
2222  if (!eof && pkt && pkt->size == 0)
2223  return 0;
2224 
2225  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2226  return AVERROR(ENOMEM);
2227  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2228  return AVERROR(ENOMEM);
2229  decoded_frame = ist->decoded_frame;
2230  if (ist->dts != AV_NOPTS_VALUE)
2231  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2232  if (pkt) {
2233  avpkt = *pkt;
2234  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2235  }
2236 
2237  // The old code used to set dts on the drain packet, which does not work
2238  // with the new API anymore.
2239  if (eof) {
2240  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2241  if (!new)
2242  return AVERROR(ENOMEM);
2243  ist->dts_buffer = new;
2244  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2245  }
2246 
2248  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2249  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2250 
2251  // The following line may be required in some cases where there is no parser
2252  // or the parser does not has_b_frames correctly
2253  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2254  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2255  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2256  } else
2258  "video_delay is larger in decoder than demuxer %d > %d.\n"
2259  "If you want to help, upload a sample "
2260  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2261  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2262  ist->dec_ctx->has_b_frames,
2263  ist->st->codecpar->video_delay);
2264  }
2265 
2266  if (ret != AVERROR_EOF)
2267  check_decode_result(ist, got_output, ret);
2268 
2269  if (*got_output && ret >= 0) {
2270  if (ist->dec_ctx->width != decoded_frame->width ||
2271  ist->dec_ctx->height != decoded_frame->height ||
2272  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2273  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2274  decoded_frame->width,
2275  decoded_frame->height,
2276  decoded_frame->format,
2277  ist->dec_ctx->width,
2278  ist->dec_ctx->height,
2279  ist->dec_ctx->pix_fmt);
2280  }
2281  }
2282 
2283  if (!*got_output || ret < 0)
2284  return ret;
2285 
2286  if(ist->top_field_first>=0)
2287  decoded_frame->top_field_first = ist->top_field_first;
2288 
2289  ist->frames_decoded++;
2290 
2291  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2292  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2293  if (err < 0)
2294  goto fail;
2295  }
2296  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2297 
2298  best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2299 
2300  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2301  best_effort_timestamp = ist->dts_buffer[0];
2302 
2303  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2304  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2305  ist->nb_dts_buffer--;
2306  }
2307 
2308  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2309  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2310 
2311  if (ts != AV_NOPTS_VALUE)
2312  ist->next_pts = ist->pts = ts;
2313  }
2314 
2315  if (debug_ts) {
2316  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2317  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2318  ist->st->index, av_ts2str(decoded_frame->pts),
2319  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2320  best_effort_timestamp,
2321  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2322  decoded_frame->key_frame, decoded_frame->pict_type,
2323  ist->st->time_base.num, ist->st->time_base.den);
2324  }
2325 
2326  if (ist->st->sample_aspect_ratio.num)
2327  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2328 
2329  resample_changed = ist->resample_width != decoded_frame->width ||
2330  ist->resample_height != decoded_frame->height ||
2331  ist->resample_pix_fmt != decoded_frame->format;
2332  if (resample_changed) {
2333  av_log(NULL, AV_LOG_INFO,
2334  "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2335  ist->file_index, ist->st->index,
2337  decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2338 
2339  ist->resample_width = decoded_frame->width;
2340  ist->resample_height = decoded_frame->height;
2341  ist->resample_pix_fmt = decoded_frame->format;
2342 
2343  for (i = 0; i < ist->nb_filters; i++) {
2344  err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
2345  if (err < 0) {
2346  av_log(NULL, AV_LOG_ERROR,
2347  "Error reconfiguring input stream %d:%d filter %d\n",
2348  ist->file_index, ist->st->index, i);
2349  goto fail;
2350  }
2351  }
2352 
2353  for (i = 0; i < nb_filtergraphs; i++) {
2354  if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2355  configure_filtergraph(filtergraphs[i]) < 0) {
2356  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2357  exit_program(1);
2358  }
2359  }
2360  }
2361 
2362  err = send_frame_to_filters(ist, decoded_frame);
2363 
2364 fail:
2366  av_frame_unref(decoded_frame);
2367  return err < 0 ? err : ret;
2368 }
2369 
2370 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2371 {
2372  AVSubtitle subtitle;
2373  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2374  &subtitle, got_output, pkt);
2375 
2376  check_decode_result(NULL, got_output, ret);
2377 
2378  if (ret < 0 || !*got_output) {
2379  if (!pkt->size)
2380  sub2video_flush(ist);
2381  return ret;
2382  }
2383 
2384  if (ist->fix_sub_duration) {
2385  int end = 1;
2386  if (ist->prev_sub.got_output) {
2387  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2388  1000, AV_TIME_BASE);
2389  if (end < ist->prev_sub.subtitle.end_display_time) {
2390  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2391  "Subtitle duration reduced from %d to %d%s\n",
2393  end <= 0 ? ", dropping it" : "");
2395  }
2396  }
2397  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2398  FFSWAP(int, ret, ist->prev_sub.ret);
2399  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2400  if (end <= 0)
2401  goto out;
2402  }
2403 
2404  if (!*got_output)
2405  return ret;
2406 
2407  sub2video_update(ist, &subtitle);
2408 
2409  if (!subtitle.num_rects)
2410  goto out;
2411 
2412  ist->frames_decoded++;
2413 
2414  for (i = 0; i < nb_output_streams; i++) {
2415  OutputStream *ost = output_streams[i];
2416 
2417  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2418  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2419  continue;
2420 
2421  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2422  }
2423 
2424 out:
2425  avsubtitle_free(&subtitle);
2426  return ret;
2427 }
2428 
2430 {
2431  int i, ret;
2432  for (i = 0; i < ist->nb_filters; i++) {
2433  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2434  if (ret < 0)
2435  return ret;
2436  }
2437  return 0;
2438 }
2439 
2440 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2441 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2442 {
2443  int ret = 0, i;
2444  int repeating = 0;
2445  int eof_reached = 0;
2446 
2447  AVPacket avpkt;
2448  if (!ist->saw_first_ts) {
2449  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2450  ist->pts = 0;
2451  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2452  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2453  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2454  }
2455  ist->saw_first_ts = 1;
2456  }
2457 
2458  if (ist->next_dts == AV_NOPTS_VALUE)
2459  ist->next_dts = ist->dts;
2460  if (ist->next_pts == AV_NOPTS_VALUE)
2461  ist->next_pts = ist->pts;
2462 
2463  if (!pkt) {
2464  /* EOF handling */
2465  av_init_packet(&avpkt);
2466  avpkt.data = NULL;
2467  avpkt.size = 0;
2468  } else {
2469  avpkt = *pkt;
2470  }
2471 
2472  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2473  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2474  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2475  ist->next_pts = ist->pts = ist->dts;
2476  }
2477 
2478  // while we have more to decode or while the decoder did output something on EOF
2479  while (ist->decoding_needed) {
2480  int duration = 0;
2481  int got_output = 0;
2482 
2483  ist->pts = ist->next_pts;
2484  ist->dts = ist->next_dts;
2485 
2486  switch (ist->dec_ctx->codec_type) {
2487  case AVMEDIA_TYPE_AUDIO:
2488  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output);
2489  break;
2490  case AVMEDIA_TYPE_VIDEO:
2491  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt);
2492  if (!repeating || !pkt || got_output) {
2493  if (pkt && pkt->duration) {
2494  duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2495  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2497  duration = ((int64_t)AV_TIME_BASE *
2498  ist->dec_ctx->framerate.den * ticks) /
2500  }
2501 
2502  if(ist->dts != AV_NOPTS_VALUE && duration) {
2503  ist->next_dts += duration;
2504  }else
2505  ist->next_dts = AV_NOPTS_VALUE;
2506  }
2507 
2508  if (got_output)
2509  ist->next_pts += duration; //FIXME the duration is not correct in some cases
2510  break;
2511  case AVMEDIA_TYPE_SUBTITLE:
2512  if (repeating)
2513  break;
2514  ret = transcode_subtitles(ist, &avpkt, &got_output);
2515  if (!pkt && ret >= 0)
2516  ret = AVERROR_EOF;
2517  break;
2518  default:
2519  return -1;
2520  }
2521 
2522  if (ret == AVERROR_EOF) {
2523  eof_reached = 1;
2524  break;
2525  }
2526 
2527  if (ret < 0) {
2528  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2529  ist->file_index, ist->st->index, av_err2str(ret));
2530  if (exit_on_error)
2531  exit_program(1);
2532  // Decoding might not terminate if we're draining the decoder, and
2533  // the decoder keeps returning an error.
2534  // This should probably be considered a libavcodec issue.
2535  // Sample: fate-vsynth1-dnxhd-720p-hr-lb
2536  if (!pkt)
2537  eof_reached = 1;
2538  break;
2539  }
2540 
2541  if (!got_output)
2542  break;
2543 
2544  // During draining, we might get multiple output frames in this loop.
2545  // ffmpeg.c does not drain the filter chain on configuration changes,
2546  // which means if we send multiple frames at once to the filters, and
2547  // one of those frames changes configuration, the buffered frames will
2548  // be lost. This can upset certain FATE tests.
2549  // Decode only 1 frame per call on EOF to appease these FATE tests.
2550  // The ideal solution would be to rewrite decoding to use the new
2551  // decoding API in a better way.
2552  if (!pkt)
2553  break;
2554 
2555  repeating = 1;
2556  }
2557 
2558  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2559  /* except when looping we need to flush but not to send an EOF */
2560  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2561  int ret = send_filter_eof(ist);
2562  if (ret < 0) {
2563  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2564  exit_program(1);
2565  }
2566  }
2567 
2568  /* handle stream copy */
2569  if (!ist->decoding_needed) {
2570  ist->dts = ist->next_dts;
2571  switch (ist->dec_ctx->codec_type) {
2572  case AVMEDIA_TYPE_AUDIO:
2573  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2574  ist->dec_ctx->sample_rate;
2575  break;
2576  case AVMEDIA_TYPE_VIDEO:
2577  if (ist->framerate.num) {
2578  // TODO: Remove work-around for c99-to-c89 issue 7
2579  AVRational time_base_q = AV_TIME_BASE_Q;
2580  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2581  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2582  } else if (pkt->duration) {
2583  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2584  } else if(ist->dec_ctx->framerate.num != 0) {
2585  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2586  ist->next_dts += ((int64_t)AV_TIME_BASE *
2587  ist->dec_ctx->framerate.den * ticks) /
2589  }
2590  break;
2591  }
2592  ist->pts = ist->dts;
2593  ist->next_pts = ist->next_dts;
2594  }
2595  for (i = 0; pkt && i < nb_output_streams; i++) {
2596  OutputStream *ost = output_streams[i];
2597 
2598  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2599  continue;
2600 
2601  do_streamcopy(ist, ost, pkt);
2602  }
2603 
2604  return !eof_reached;
2605 }
2606 
2607 static void print_sdp(void)
2608 {
2609  char sdp[16384];
2610  int i;
2611  int j;
2612  AVIOContext *sdp_pb;
2613  AVFormatContext **avc;
2614 
2615  for (i = 0; i < nb_output_files; i++) {
2616  if (!output_files[i]->header_written)
2617  return;
2618  }
2619 
2620  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2621  if (!avc)
2622  exit_program(1);
2623  for (i = 0, j = 0; i < nb_output_files; i++) {
2624  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2625  avc[j] = output_files[i]->ctx;
2626  j++;
2627  }
2628  }
2629 
2630  if (!j)
2631  goto fail;
2632 
2633  av_sdp_create(avc, j, sdp, sizeof(sdp));
2634 
2635  if (!sdp_filename) {
2636  printf("SDP:\n%s\n", sdp);
2637  fflush(stdout);
2638  } else {
2639  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2640  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2641  } else {
2642  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2643  avio_closep(&sdp_pb);
2645  }
2646  }
2647 
2648 fail:
2649  av_freep(&avc);
2650 }
2651 
2653 {
2654  int i;
2655  for (i = 0; hwaccels[i].name; i++)
2656  if (hwaccels[i].pix_fmt == pix_fmt)
2657  return &hwaccels[i];
2658  return NULL;
2659 }
2660 
2662 {
2663  InputStream *ist = s->opaque;
2664  const enum AVPixelFormat *p;
2665  int ret;
2666 
2667  for (p = pix_fmts; *p != -1; p++) {
2669  const HWAccel *hwaccel;
2670 
2671  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2672  break;
2673 
2674  hwaccel = get_hwaccel(*p);
2675  if (!hwaccel ||
2676  (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2677  (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2678  continue;
2679 
2680  ret = hwaccel->init(s);
2681  if (ret < 0) {
2682  if (ist->hwaccel_id == hwaccel->id) {
2684  "%s hwaccel requested for input stream #%d:%d, "
2685  "but cannot be initialized.\n", hwaccel->name,
2686  ist->file_index, ist->st->index);
2687  return AV_PIX_FMT_NONE;
2688  }
2689  continue;
2690  }
2691 
2692  if (ist->hw_frames_ctx) {
2694  if (!s->hw_frames_ctx)
2695  return AV_PIX_FMT_NONE;
2696  }
2697 
2698  ist->active_hwaccel_id = hwaccel->id;
2699  ist->hwaccel_pix_fmt = *p;
2700  break;
2701  }
2702 
2703  return *p;
2704 }
2705 
2707 {
2708  InputStream *ist = s->opaque;
2709 
2710  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2711  return ist->hwaccel_get_buffer(s, frame, flags);
2712 
2713  return avcodec_default_get_buffer2(s, frame, flags);
2714 }
2715 
2716 static int init_input_stream(int ist_index, char *error, int error_len)
2717 {
2718  int i, ret;
2719  InputStream *ist = input_streams[ist_index];
2720 
2721  for (i = 0; i < ist->nb_filters; i++) {
2722  ret = ifilter_parameters_from_decoder(ist->filters[i], ist->dec_ctx);
2723  if (ret < 0) {
2724  av_log(NULL, AV_LOG_FATAL, "Error initializing filter input\n");
2725  return ret;
2726  }
2727  }
2728 
2729  if (ist->decoding_needed) {
2730  AVCodec *codec = ist->dec;
2731  if (!codec) {
2732  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2733  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2734  return AVERROR(EINVAL);
2735  }
2736 
2737  ist->dec_ctx->opaque = ist;
2738  ist->dec_ctx->get_format = get_format;
2739  ist->dec_ctx->get_buffer2 = get_buffer;
2740  ist->dec_ctx->thread_safe_callbacks = 1;
2741 
2742  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2743  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2744  (ist->decoding_needed & DECODING_FOR_OST)) {
2745  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2747  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2748  }
2749 
2750  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2751 
2752  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2753  * audio, and video decoders such as cuvid or mediacodec */
2755 
2756  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2757  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2758  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2759  if (ret == AVERROR_EXPERIMENTAL)
2760  abort_codec_experimental(codec, 0);
2761 
2762  snprintf(error, error_len,
2763  "Error while opening decoder for input stream "
2764  "#%d:%d : %s",
2765  ist->file_index, ist->st->index, av_err2str(ret));
2766  return ret;
2767  }
2769  }
2770 
2771  ist->next_pts = AV_NOPTS_VALUE;
2772  ist->next_dts = AV_NOPTS_VALUE;
2773 
2774  return 0;
2775 }
2776 
2778 {
2779  if (ost->source_index >= 0)
2780  return input_streams[ost->source_index];
2781  return NULL;
2782 }
2783 
2784 static int compare_int64(const void *a, const void *b)
2785 {
2786  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2787 }
2788 
2789 /* open the muxer when all the streams are initialized */
2790 static int check_init_output_file(OutputFile *of, int file_index)
2791 {
2792  int ret, i;
2793 
2794  for (i = 0; i < of->ctx->nb_streams; i++) {
2795  OutputStream *ost = output_streams[of->ost_index + i];
2796  if (!ost->initialized)
2797  return 0;
2798  }
2799 
2800  of->ctx->interrupt_callback = int_cb;
2801 
2802  ret = avformat_write_header(of->ctx, &of->opts);
2803  if (ret < 0) {
2805  "Could not write header for output file #%d "
2806  "(incorrect codec parameters ?): %s\n",
2807  file_index, av_err2str(ret));
2808  return ret;
2809  }
2810  //assert_avoptions(of->opts);
2811  of->header_written = 1;
2812 
2813  av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2814 
2815  if (sdp_filename || want_sdp)
2816  print_sdp();
2817 
2818  /* flush the muxing queues */
2819  for (i = 0; i < of->ctx->nb_streams; i++) {
2820  OutputStream *ost = output_streams[of->ost_index + i];
2821 
2822  while (av_fifo_size(ost->muxing_queue)) {
2823  AVPacket pkt;
2824  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2825  write_packet(of, &pkt, ost);
2826  }
2827  }
2828 
2829  return 0;
2830 }
2831 
2833 {
2834  AVBSFContext *ctx;
2835  int i, ret;
2836 
2837  if (!ost->nb_bitstream_filters)
2838  return 0;
2839 
2840  for (i = 0; i < ost->nb_bitstream_filters; i++) {
2841  ctx = ost->bsf_ctx[i];
2842 
2843  ret = avcodec_parameters_copy(ctx->par_in,
2844  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2845  if (ret < 0)
2846  return ret;
2847 
2848  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2849 
2850  ret = av_bsf_init(ctx);
2851  if (ret < 0) {
2852  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2853  ost->bsf_ctx[i]->filter->name);
2854  return ret;
2855  }
2856  }
2857 
2858  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
2859  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2860  if (ret < 0)
2861  return ret;
2862 
2863  ost->st->time_base = ctx->time_base_out;
2864 
2865  return 0;
2866 }
2867 
2869 {
2870  OutputFile *of = output_files[ost->file_index];
2871  InputStream *ist = get_input_stream(ost);
2872  AVCodecParameters *par_dst = ost->st->codecpar;
2873  AVCodecParameters *par_src = ost->ref_par;
2874  AVRational sar;
2875  int i, ret;
2876  uint32_t codec_tag = par_dst->codec_tag;
2877 
2878  av_assert0(ist && !ost->filter);
2879 
2881  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2882  if (ret < 0) {
2884  "Error setting up codec context options.\n");
2885  return ret;
2886  }
2888 
2889  if (!codec_tag) {
2890  unsigned int codec_tag_tmp;
2891  if (!of->ctx->oformat->codec_tag ||
2892  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
2893  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
2894  codec_tag = par_src->codec_tag;
2895  }
2896 
2897  ret = avcodec_parameters_copy(par_dst, par_src);
2898  if (ret < 0)
2899  return ret;
2900 
2901  par_dst->codec_tag = codec_tag;
2902 
2903  if (!ost->frame_rate.num)
2904  ost->frame_rate = ist->framerate;
2905  ost->st->avg_frame_rate = ost->frame_rate;
2906 
2908  if (ret < 0)
2909  return ret;
2910 
2911  // copy timebase while removing common factors
2913 
2914  // copy disposition
2915  ost->st->disposition = ist->st->disposition;
2916 
2917  if (ist->st->nb_side_data) {
2918  ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2919  sizeof(*ist->st->side_data));
2920  if (!ost->st->side_data)
2921  return AVERROR(ENOMEM);
2922 
2923  ost->st->nb_side_data = 0;
2924  for (i = 0; i < ist->st->nb_side_data; i++) {
2925  const AVPacketSideData *sd_src = &ist->st->side_data[i];
2926  AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2927 
2928  if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2929  continue;
2930 
2931  sd_dst->data = av_malloc(sd_src->size);
2932  if (!sd_dst->data)
2933  return AVERROR(ENOMEM);
2934  memcpy(sd_dst->data, sd_src->data, sd_src->size);
2935  sd_dst->size = sd_src->size;
2936  sd_dst->type = sd_src->type;
2937  ost->st->nb_side_data++;
2938  }
2939  }
2940 
2941  ost->parser = av_parser_init(par_dst->codec_id);
2942  ost->parser_avctx = avcodec_alloc_context3(NULL);
2943  if (!ost->parser_avctx)
2944  return AVERROR(ENOMEM);
2945 
2946  switch (par_dst->codec_type) {
2947  case AVMEDIA_TYPE_AUDIO:
2948  if (audio_volume != 256) {
2949  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2950  exit_program(1);
2951  }
2952  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
2953  par_dst->block_align= 0;
2954  if(par_dst->codec_id == AV_CODEC_ID_AC3)
2955  par_dst->block_align= 0;
2956  break;
2957  case AVMEDIA_TYPE_VIDEO:
2958  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2959  sar =
2960  av_mul_q(ost->frame_aspect_ratio,
2961  (AVRational){ par_dst->height, par_dst->width });
2962  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2963  "with stream copy may produce invalid files\n");
2964  }
2965  else if (ist->st->sample_aspect_ratio.num)
2966  sar = ist->st->sample_aspect_ratio;
2967  else
2968  sar = par_src->sample_aspect_ratio;
2969  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
2970  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2971  ost->st->r_frame_rate = ist->st->r_frame_rate;
2972  break;
2973  }
2974 
2975  return 0;
2976 }
2977 
2979 {
2980  AVDictionaryEntry *e;
2981 
2982  uint8_t *encoder_string;
2983  int encoder_string_len;
2984  int format_flags = 0;
2985  int codec_flags = 0;
2986 
2987  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2988  return;
2989 
2990  e = av_dict_get(of->opts, "fflags", NULL, 0);
2991  if (e) {
2992  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2993  if (!o)
2994  return;
2995  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2996  }
2997  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2998  if (e) {
2999  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3000  if (!o)
3001  return;
3002  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3003  }
3004 
3005  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3006  encoder_string = av_mallocz(encoder_string_len);
3007  if (!encoder_string)
3008  exit_program(1);
3009 
3010  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3011  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3012  else
3013  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3014  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3015  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3017 }
3018 
3019 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3020  AVCodecContext *avctx)
3021 {
3022  char *p;
3023  int n = 1, i, size, index = 0;
3024  int64_t t, *pts;
3025 
3026  for (p = kf; *p; p++)
3027  if (*p == ',')
3028  n++;
3029  size = n;
3030  pts = av_malloc_array(size, sizeof(*pts));
3031  if (!pts) {
3032  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3033  exit_program(1);
3034  }
3035 
3036  p = kf;
3037  for (i = 0; i < n; i++) {
3038  char *next = strchr(p, ',');
3039 
3040  if (next)
3041  *next++ = 0;
3042 
3043  if (!memcmp(p, "chapters", 8)) {
3044 
3045  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3046  int j;
3047 
3048  if (avf->nb_chapters > INT_MAX - size ||
3049  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3050  sizeof(*pts)))) {
3052  "Could not allocate forced key frames array.\n");
3053  exit_program(1);
3054  }
3055  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3056  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3057 
3058  for (j = 0; j < avf->nb_chapters; j++) {
3059  AVChapter *c = avf->chapters[j];
3060  av_assert1(index < size);
3061  pts[index++] = av_rescale_q(c->start, c->time_base,
3062  avctx->time_base) + t;
3063  }
3064 
3065  } else {
3066 
3067  t = parse_time_or_die("force_key_frames", p, 1);
3068  av_assert1(index < size);
3069  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3070 
3071  }
3072 
3073  p = next;
3074  }
3075 
3076  av_assert0(index == size);
3077  qsort(pts, size, sizeof(*pts), compare_int64);
3078  ost->forced_kf_count = size;
3079  ost->forced_kf_pts = pts;
3080 }
3081 
3083 {
3084  InputStream *ist = get_input_stream(ost);
3085  AVCodecContext *enc_ctx = ost->enc_ctx;
3087  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3088  int j, ret;
3089 
3090  set_encoder_id(output_files[ost->file_index], ost);
3091 
3092  if (ist) {
3093  ost->st->disposition = ist->st->disposition;
3094 
3095  dec_ctx = ist->dec_ctx;
3096 
3097  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3098  } else {
3099  for (j = 0; j < oc->nb_streams; j++) {
3100  AVStream *st = oc->streams[j];
3101  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3102  break;
3103  }
3104  if (j == oc->nb_streams)
3105  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3108  }
3109 
3110  if ((enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3111  enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3113  FilterGraph *fg = ost->filter->graph;
3114 
3115  if (configure_filtergraph(fg)) {
3116  av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3117  exit_program(1);
3118  }
3119  }
3120 
3121  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3122  if (!ost->frame_rate.num)
3124  if (ist && !ost->frame_rate.num)
3125  ost->frame_rate = ist->framerate;
3126  if (ist && !ost->frame_rate.num)
3127  ost->frame_rate = ist->st->r_frame_rate;
3128  if (ist && !ost->frame_rate.num) {
3129  ost->frame_rate = (AVRational){25, 1};
3131  "No information "
3132  "about the input framerate is available. Falling "
3133  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3134  "if you want a different framerate.\n",
3135  ost->file_index, ost->index);
3136  }
3137 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3138  if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3139  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3140  ost->frame_rate = ost->enc->supported_framerates[idx];
3141  }
3142  // reduce frame rate for mpeg4 to be within the spec limits
3143  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3144  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3145  ost->frame_rate.num, ost->frame_rate.den, 65535);
3146  }
3147  }
3148 
3149  switch (enc_ctx->codec_type) {
3150  case AVMEDIA_TYPE_AUDIO:
3151  enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3152  if (dec_ctx)
3153  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3154  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3155  enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3156  enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3157  enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3158  enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3159  break;
3160  case AVMEDIA_TYPE_VIDEO:
3161  enc_ctx->time_base = av_inv_q(ost->frame_rate);
3162  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3163  enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3164  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3166  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3167  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3168  }
3169  for (j = 0; j < ost->forced_kf_count; j++)
3170  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3172  enc_ctx->time_base);
3173 
3174  enc_ctx->width = ost->filter->filter->inputs[0]->w;
3175  enc_ctx->height = ost->filter->filter->inputs[0]->h;
3176  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3177  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3178  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3180  if (!strncmp(ost->enc->name, "libx264", 7) &&
3181  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3184  "No pixel format specified, %s for H.264 encoding chosen.\n"
3185  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3187  if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3188  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3191  "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3192  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3194  enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3195  if (dec_ctx)
3196  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3197  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3198 
3199  ost->st->avg_frame_rate = ost->frame_rate;
3200 
3201  if (!dec_ctx ||
3202  enc_ctx->width != dec_ctx->width ||
3203  enc_ctx->height != dec_ctx->height ||
3204  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3206  }
3207 
3208  if (ost->forced_keyframes) {
3209  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3212  if (ret < 0) {
3214  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3215  return ret;
3216  }
3221 
3222  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3223  // parse it only for static kf timings
3224  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3226  }
3227  }
3228  break;
3229  case AVMEDIA_TYPE_SUBTITLE:
3230  enc_ctx->time_base = (AVRational){1, 1000};
3231  if (!enc_ctx->width) {
3232  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3233  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3234  }
3235  break;
3236  case AVMEDIA_TYPE_DATA:
3237  break;
3238  default:
3239  abort();
3240  break;
3241  }
3242 
3243  return 0;
3244 }
3245 
3246 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3247 {
3248  int ret = 0;
3249 
3250  if (ost->encoding_needed) {
3251  AVCodec *codec = ost->enc;
3252  AVCodecContext *dec = NULL;
3253  InputStream *ist;
3254 
3255  ret = init_output_stream_encode(ost);
3256  if (ret < 0)
3257  return ret;
3258 
3259  if ((ist = get_input_stream(ost)))
3260  dec = ist->dec_ctx;
3261  if (dec && dec->subtitle_header) {
3262  /* ASS code assumes this buffer is null terminated so add extra byte. */
3264  if (!ost->enc_ctx->subtitle_header)
3265  return AVERROR(ENOMEM);
3266  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3268  }
3269  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3270  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3271  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3272  !codec->defaults &&
3273  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3274  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3275  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3276 
3277  if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx) {
3279  if (!ost->enc_ctx->hw_frames_ctx)
3280  return AVERROR(ENOMEM);
3281  }
3282 
3283  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3284  if (ret == AVERROR_EXPERIMENTAL)
3285  abort_codec_experimental(codec, 1);
3286  snprintf(error, error_len,
3287  "Error while opening encoder for output stream #%d:%d - "
3288  "maybe incorrect parameters such as bit_rate, rate, width or height",
3289  ost->file_index, ost->index);
3290  return ret;
3291  }
3292  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3293  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3295  ost->enc_ctx->frame_size);
3297  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3298  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3299  " It takes bits/s as argument, not kbits/s\n");
3300 
3302  if (ret < 0) {
3304  "Error initializing the output stream codec context.\n");
3305  exit_program(1);
3306  }
3307  /*
3308  * FIXME: ost->st->codec should't be needed here anymore.
3309  */
3310  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3311  if (ret < 0)
3312  return ret;
3313 
3314  if (ost->enc_ctx->nb_coded_side_data) {
3315  int i;
3316 
3318  sizeof(*ost->st->side_data));
3319  if (!ost->st->side_data)
3320  return AVERROR(ENOMEM);
3321 
3322  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3323  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3324  AVPacketSideData *sd_dst = &ost->st->side_data[i];
3325 
3326  sd_dst->data = av_malloc(sd_src->size);
3327  if (!sd_dst->data)
3328  return AVERROR(ENOMEM);
3329  memcpy(sd_dst->data, sd_src->data, sd_src->size);
3330  sd_dst->size = sd_src->size;
3331  sd_dst->type = sd_src->type;
3332  ost->st->nb_side_data++;
3333  }
3334  }
3335 
3336  // copy timebase while removing common factors
3337  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3338  ost->st->codec->codec= ost->enc_ctx->codec;
3339  } else if (ost->stream_copy) {
3340  ret = init_output_stream_streamcopy(ost);
3341  if (ret < 0)
3342  return ret;
3343 
3344  /*
3345  * FIXME: will the codec context used by the parser during streamcopy
3346  * This should go away with the new parser API.
3347  */
3348  ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3349  if (ret < 0)
3350  return ret;
3351  }
3352 
3353  // parse user provided disposition, and update stream values
3354  if (ost->disposition) {
3355  static const AVOption opts[] = {
3356  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3357  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3358  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3359  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3360  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3361  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3362  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3363  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3364  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3365  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3366  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3367  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3368  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3369  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3370  { NULL },
3371  };
3372  static const AVClass class = {
3373  .class_name = "",
3374  .item_name = av_default_item_name,
3375  .option = opts,
3376  .version = LIBAVUTIL_VERSION_INT,
3377  };
3378  const AVClass *pclass = &class;
3379 
3380  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3381  if (ret < 0)
3382  return ret;
3383  }
3384 
3385  /* initialize bitstream filters for the output stream
3386  * needs to be done here, because the codec id for streamcopy is not
3387  * known until now */
3388  ret = init_output_bsfs(ost);
3389  if (ret < 0)
3390  return ret;
3391 
3392  ost->initialized = 1;
3393 
3394  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3395  if (ret < 0)
3396  return ret;
3397 
3398  return ret;
3399 }
3400 
3401 static void report_new_stream(int input_index, AVPacket *pkt)
3402 {
3403  InputFile *file = input_files[input_index];
3404  AVStream *st = file->ctx->streams[pkt->stream_index];
3405 
3406  if (pkt->stream_index < file->nb_streams_warn)
3407  return;
3408  av_log(file->ctx, AV_LOG_WARNING,
3409  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3411  input_index, pkt->stream_index,
3412  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3413  file->nb_streams_warn = pkt->stream_index + 1;
3414 }
3415 
3416 static int transcode_init(void)
3417 {
3418  int ret = 0, i, j, k;
3419  AVFormatContext *oc;
3420  OutputStream *ost;
3421  InputStream *ist;
3422  char error[1024] = {0};
3423 
3424  for (i = 0; i < nb_filtergraphs; i++) {
3425  FilterGraph *fg = filtergraphs[i];
3426  for (j = 0; j < fg->nb_outputs; j++) {
3427  OutputFilter *ofilter = fg->outputs[j];
3428  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3429  continue;
3430  if (fg->nb_inputs != 1)
3431  continue;
3432  for (k = nb_input_streams-1; k >= 0 ; k--)
3433  if (fg->inputs[0]->ist == input_streams[k])
3434  break;
3435  ofilter->ost->source_index = k;
3436  }
3437  }
3438 
3439  /* init framerate emulation */
3440  for (i = 0; i < nb_input_files; i++) {
3441  InputFile *ifile = input_files[i];
3442  if (ifile->rate_emu)
3443  for (j = 0; j < ifile->nb_streams; j++)
3444  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3445  }
3446 
3447  /* hwaccel transcoding */
3448  for (i = 0; i < nb_output_streams; i++) {
3449  ost = output_streams[i];
3450 
3451  if (!ost->stream_copy) {
3452 #if CONFIG_LIBMFX
3453  if (qsv_transcode_init(ost))
3454  exit_program(1);
3455 #endif
3456 
3457 #if CONFIG_CUVID
3458  if (cuvid_transcode_init(ost))
3459  exit_program(1);
3460 #endif
3461  }
3462  }
3463 
3464  /* init input streams */
3465  for (i = 0; i < nb_input_streams; i++)
3466  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3467  for (i = 0; i < nb_output_streams; i++) {
3468  ost = output_streams[i];
3469  avcodec_close(ost->enc_ctx);
3470  }
3471  goto dump_format;
3472  }
3473 
3474  /* open each encoder */
3475  for (i = 0; i < nb_output_streams; i++) {
3476  ret = init_output_stream(output_streams[i], error, sizeof(error));
3477  if (ret < 0)
3478  goto dump_format;
3479  }
3480 
3481  /* discard unused programs */
3482  for (i = 0; i < nb_input_files; i++) {
3483  InputFile *ifile = input_files[i];
3484  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3485  AVProgram *p = ifile->ctx->programs[j];
3486  int discard = AVDISCARD_ALL;
3487 
3488  for (k = 0; k < p->nb_stream_indexes; k++)
3489  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3490  discard = AVDISCARD_DEFAULT;
3491  break;
3492  }
3493  p->discard = discard;
3494  }
3495  }
3496 
3497  /* write headers for files with no streams */
3498  for (i = 0; i < nb_output_files; i++) {
3499  oc = output_files[i]->ctx;
3500  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3501  ret = check_init_output_file(output_files[i], i);
3502  if (ret < 0)
3503  goto dump_format;
3504  }
3505  }
3506 
3507  dump_format:
3508  /* dump the stream mapping */
3509  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3510  for (i = 0; i < nb_input_streams; i++) {
3511  ist = input_streams[i];
3512 
3513  for (j = 0; j < ist->nb_filters; j++) {
3514  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3515  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3516  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3517  ist->filters[j]->name);
3518  if (nb_filtergraphs > 1)
3519  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3520  av_log(NULL, AV_LOG_INFO, "\n");
3521  }
3522  }
3523  }
3524 
3525  for (i = 0; i < nb_output_streams; i++) {
3526  ost = output_streams[i];
3527 
3528  if (ost->attachment_filename) {
3529  /* an attached file */
3530  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3531  ost->attachment_filename, ost->file_index, ost->index);
3532  continue;
3533  }
3534 
3535  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3536  /* output from a complex graph */
3537  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3538  if (nb_filtergraphs > 1)
3539  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3540 
3541  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3542  ost->index, ost->enc ? ost->enc->name : "?");
3543  continue;
3544  }
3545 
3546  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3547  input_streams[ost->source_index]->file_index,
3548  input_streams[ost->source_index]->st->index,
3549  ost->file_index,
3550  ost->index);
3551  if (ost->sync_ist != input_streams[ost->source_index])
3552  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3553  ost->sync_ist->file_index,
3554  ost->sync_ist->st->index);
3555  if (ost->stream_copy)
3556  av_log(NULL, AV_LOG_INFO, " (copy)");
3557  else {
3558  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3559  const AVCodec *out_codec = ost->enc;
3560  const char *decoder_name = "?";
3561  const char *in_codec_name = "?";
3562  const char *encoder_name = "?";
3563  const char *out_codec_name = "?";
3564  const AVCodecDescriptor *desc;
3565 
3566  if (in_codec) {
3567  decoder_name = in_codec->name;
3568  desc = avcodec_descriptor_get(in_codec->id);
3569  if (desc)
3570  in_codec_name = desc->name;
3571  if (!strcmp(decoder_name, in_codec_name))
3572  decoder_name = "native";
3573  }
3574 
3575  if (out_codec) {
3576  encoder_name = out_codec->name;
3577  desc = avcodec_descriptor_get(out_codec->id);
3578  if (desc)
3579  out_codec_name = desc->name;
3580  if (!strcmp(encoder_name, out_codec_name))
3581  encoder_name = "native";
3582  }
3583 
3584  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3585  in_codec_name, decoder_name,
3586  out_codec_name, encoder_name);
3587  }
3588  av_log(NULL, AV_LOG_INFO, "\n");
3589  }
3590 
3591  if (ret) {
3592  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3593  return ret;
3594  }
3595 
3596  transcode_init_done = 1;
3597 
3598  return 0;
3599 }
3600 
3601 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3602 static int need_output(void)
3603 {
3604  int i;
3605 
3606  for (i = 0; i < nb_output_streams; i++) {
3607  OutputStream *ost = output_streams[i];
3608  OutputFile *of = output_files[ost->file_index];
3609  AVFormatContext *os = output_files[ost->file_index]->ctx;
3610 
3611  if (ost->finished ||
3612  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3613  continue;
3614  if (ost->frame_number >= ost->max_frames) {
3615  int j;
3616  for (j = 0; j < of->ctx->nb_streams; j++)
3617  close_output_stream(output_streams[of->ost_index + j]);
3618  continue;
3619  }
3620 
3621  return 1;
3622  }
3623 
3624  return 0;
3625 }
3626 
3627 /**
3628  * Select the output stream to process.
3629  *
3630  * @return selected output stream, or NULL if none available
3631  */
3633 {
3634  int i;
3635  int64_t opts_min = INT64_MAX;
3636  OutputStream *ost_min = NULL;
3637 
3638  for (i = 0; i < nb_output_streams; i++) {
3639  OutputStream *ost = output_streams[i];
3640  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3641  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3642  AV_TIME_BASE_Q);
3643  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3644  av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3645 
3646  if (!ost->finished && opts < opts_min) {
3647  opts_min = opts;
3648  ost_min = ost->unavailable ? NULL : ost;
3649  }
3650  }
3651  return ost_min;
3652 }
3653 
3654 static void set_tty_echo(int on)
3655 {
3656 #if HAVE_TERMIOS_H
3657  struct termios tty;
3658  if (tcgetattr(0, &tty) == 0) {
3659  if (on) tty.c_lflag |= ECHO;
3660  else tty.c_lflag &= ~ECHO;
3661  tcsetattr(0, TCSANOW, &tty);
3662  }
3663 #endif
3664 }
3665 
3667 {
3668  int i, ret, key;
3669  static int64_t last_time;
3670  if (received_nb_signals)
3671  return AVERROR_EXIT;
3672  /* read_key() returns 0 on EOF */
3673  if(cur_time - last_time >= 100000 && !run_as_daemon){
3674  key = read_key();
3675  last_time = cur_time;
3676  }else
3677  key = -1;
3678  if (key == 'q')
3679  return AVERROR_EXIT;
3680  if (key == '+') av_log_set_level(av_log_get_level()+10);
3681  if (key == '-') av_log_set_level(av_log_get_level()-10);
3682  if (key == 's') qp_hist ^= 1;
3683  if (key == 'h'){
3684  if (do_hex_dump){
3685  do_hex_dump = do_pkt_dump = 0;
3686  } else if(do_pkt_dump){
3687  do_hex_dump = 1;
3688  } else
3689  do_pkt_dump = 1;
3691  }
3692  if (key == 'c' || key == 'C'){
3693  char buf[4096], target[64], command[256], arg[256] = {0};
3694  double time;
3695  int k, n = 0;
3696  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3697  i = 0;
3698  set_tty_echo(1);
3699  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3700  if (k > 0)
3701  buf[i++] = k;
3702  buf[i] = 0;
3703  set_tty_echo(0);
3704  fprintf(stderr, "\n");
3705  if (k > 0 &&
3706  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3707  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3708  target, time, command, arg);
3709  for (i = 0; i < nb_filtergraphs; i++) {
3710  FilterGraph *fg = filtergraphs[i];
3711  if (fg->graph) {
3712  if (time < 0) {
3713  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3714  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3715  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3716  } else if (key == 'c') {
3717  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3718  ret = AVERROR_PATCHWELCOME;
3719  } else {
3720  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3721  if (ret < 0)
3722  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3723  }
3724  }
3725  }
3726  } else {
3728  "Parse error, at least 3 arguments were expected, "
3729  "only %d given in string '%s'\n", n, buf);
3730  }
3731  }
3732  if (key == 'd' || key == 'D'){
3733  int debug=0;
3734  if(key == 'D') {
3735  debug = input_streams[0]->st->codec->debug<<1;
3736  if(!debug) debug = 1;
3737  while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3738  debug += debug;
3739  }else{
3740  char buf[32];
3741  int k = 0;
3742  i = 0;
3743  set_tty_echo(1);
3744  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3745  if (k > 0)
3746  buf[i++] = k;
3747  buf[i] = 0;
3748  set_tty_echo(0);
3749  fprintf(stderr, "\n");
3750  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3751  fprintf(stderr,"error parsing debug value\n");
3752  }
3753  for(i=0;i<nb_input_streams;i++) {
3754  input_streams[i]->st->codec->debug = debug;
3755  }
3756  for(i=0;i<nb_output_streams;i++) {
3757  OutputStream *ost = output_streams[i];
3758  ost->enc_ctx->debug = debug;
3759  }
3760  if(debug) av_log_set_level(AV_LOG_DEBUG);
3761  fprintf(stderr,"debug=%d\n", debug);
3762  }
3763  if (key == '?'){
3764  fprintf(stderr, "key function\n"
3765  "? show this help\n"
3766  "+ increase verbosity\n"
3767  "- decrease verbosity\n"
3768  "c Send command to first matching filter supporting it\n"
3769  "C Send/Queue command to all matching filters\n"
3770  "D cycle through available debug modes\n"
3771  "h dump packets/hex press to cycle through the 3 states\n"
3772  "q quit\n"
3773  "s Show QP histogram\n"
3774  );
3775  }
3776  return 0;
3777 }
3778 
3779 #if HAVE_PTHREADS
3780 static void *input_thread(void *arg)
3781 {
3782  InputFile *f = arg;
3783  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3784  int ret = 0;
3785 
3786  while (1) {
3787  AVPacket pkt;
3788  ret = av_read_frame(f->ctx, &pkt);
3789 
3790  if (ret == AVERROR(EAGAIN)) {
3791  av_usleep(10000);
3792  continue;
3793  }
3794  if (ret < 0) {
3795  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3796  break;
3797  }
3798  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3799  if (flags && ret == AVERROR(EAGAIN)) {
3800  flags = 0;
3801  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3803  "Thread message queue blocking; consider raising the "
3804  "thread_queue_size option (current value: %d)\n",
3805  f->thread_queue_size);
3806  }
3807  if (ret < 0) {
3808  if (ret != AVERROR_EOF)
3809  av_log(f->ctx, AV_LOG_ERROR,
3810  "Unable to send packet to main thread: %s\n",
3811  av_err2str(ret));
3812  av_packet_unref(&pkt);
3813  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3814  break;
3815  }
3816  }
3817 
3818  return NULL;
3819 }
3820 
3821 static void free_input_threads(void)
3822 {
3823  int i;
3824 
3825  for (i = 0; i < nb_input_files; i++) {
3826  InputFile *f = input_files[i];
3827  AVPacket pkt;
3828 
3829  if (!f || !f->in_thread_queue)
3830  continue;
3832  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3833  av_packet_unref(&pkt);
3834 
3835  pthread_join(f->thread, NULL);
3836  f->joined = 1;
3837  av_thread_message_queue_free(&f->in_thread_queue);
3838  }
3839 }
3840 
3841 static int init_input_threads(void)
3842 {
3843  int i, ret;
3844 
3845  if (nb_input_files == 1)
3846  return 0;
3847 
3848  for (i = 0; i < nb_input_files; i++) {
3849  InputFile *f = input_files[i];
3850 
3851  if (f->ctx->pb ? !f->ctx->pb->seekable :
3852  strcmp(f->ctx->iformat->name, "lavfi"))
3853  f->non_blocking = 1;
3854  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3855  f->thread_queue_size, sizeof(AVPacket));
3856  if (ret < 0)
3857  return ret;
3858 
3859  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3860  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3861  av_thread_message_queue_free(&f->in_thread_queue);
3862  return AVERROR(ret);
3863  }
3864  }
3865  return 0;
3866 }
3867 
3868 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3869 {
3870  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3871  f->non_blocking ?
3873 }
3874 #endif
3875 
3877 {
3878  if (f->rate_emu) {
3879  int i;
3880  for (i = 0; i < f->nb_streams; i++) {
3881  InputStream *ist = input_streams[f->ist_index + i];
3882  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3883  int64_t now = av_gettime_relative() - ist->start;
3884  if (pts > now)
3885  return AVERROR(EAGAIN);
3886  }
3887  }
3888 
3889 #if HAVE_PTHREADS
3890  if (nb_input_files > 1)
3891  return get_input_packet_mt(f, pkt);
3892 #endif
3893  return av_read_frame(f->ctx, pkt);
3894 }
3895 
3896 static int got_eagain(void)
3897 {
3898  int i;
3899  for (i = 0; i < nb_output_streams; i++)
3900  if (output_streams[i]->unavailable)
3901  return 1;
3902  return 0;
3903 }
3904 
3905 static void reset_eagain(void)
3906 {
3907  int i;
3908  for (i = 0; i < nb_input_files; i++)
3909  input_files[i]->eagain = 0;
3910  for (i = 0; i < nb_output_streams; i++)
3911  output_streams[i]->unavailable = 0;
3912 }
3913 
3914 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3915 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3916  AVRational time_base)
3917 {
3918  int ret;
3919 
3920  if (!*duration) {
3921  *duration = tmp;
3922  return tmp_time_base;
3923  }
3924 
3925  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3926  if (ret < 0) {
3927  *duration = tmp;
3928  return tmp_time_base;
3929  }
3930 
3931  return time_base;
3932 }
3933 
3935 {
3936  InputStream *ist;
3937  AVCodecContext *avctx;
3938  int i, ret, has_audio = 0;
3939  int64_t duration = 0;
3940 
3941  ret = av_seek_frame(is, -1, is->start_time, 0);
3942  if (ret < 0)
3943  return ret;
3944 
3945  for (i = 0; i < ifile->nb_streams; i++) {
3946  ist = input_streams[ifile->ist_index + i];
3947  avctx = ist->dec_ctx;
3948 
3949  // flush decoders
3950  if (ist->decoding_needed) {
3951  process_input_packet(ist, NULL, 1);
3952  avcodec_flush_buffers(avctx);
3953  }
3954 
3955  /* duration is the length of the last frame in a stream
3956  * when audio stream is present we don't care about
3957  * last video frame length because it's not defined exactly */
3958  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3959  has_audio = 1;
3960  }
3961 
3962  for (i = 0; i < ifile->nb_streams; i++) {
3963  ist = input_streams[ifile->ist_index + i];
3964  avctx = ist->dec_ctx;
3965 
3966  if (has_audio) {
3967  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3968  AVRational sample_rate = {1, avctx->sample_rate};
3969 
3970  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3971  } else
3972  continue;
3973  } else {
3974  if (ist->framerate.num) {
3975  duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3976  } else if (ist->st->avg_frame_rate.num) {
3977  duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3978  } else duration = 1;
3979  }
3980  if (!ifile->duration)
3981  ifile->time_base = ist->st->time_base;
3982  /* the total duration of the stream, max_pts - min_pts is
3983  * the duration of the stream without the last frame */
3984  duration += ist->max_pts - ist->min_pts;
3985  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3986  ifile->time_base);
3987  }
3988 
3989  if (ifile->loop > 0)
3990  ifile->loop--;
3991 
3992  return ret;
3993 }
3994 
3995 /*
3996  * Return
3997  * - 0 -- one packet was read and processed
3998  * - AVERROR(EAGAIN) -- no packets were available for selected file,
3999  * this function should be called again
4000  * - AVERROR_EOF -- this function should not be called again
4001  */
4002 static int process_input(int file_index)
4003 {
4004  InputFile *ifile = input_files[file_index];
4005  AVFormatContext *is;
4006  InputStream *ist;
4007  AVPacket pkt;
4008  int ret, i, j;
4009  int64_t duration;
4010  int64_t pkt_dts;
4011 
4012  is = ifile->ctx;
4013  ret = get_input_packet(ifile, &pkt);
4014 
4015  if (ret == AVERROR(EAGAIN)) {
4016  ifile->eagain = 1;
4017  return ret;
4018  }
4019  if (ret < 0 && ifile->loop) {
4020  if ((ret = seek_to_start(ifile, is)) < 0)
4021  return ret;
4022  ret = get_input_packet(ifile, &pkt);
4023  if (ret == AVERROR(EAGAIN)) {
4024  ifile->eagain = 1;
4025  return ret;
4026  }
4027  }
4028  if (ret < 0) {
4029  if (ret != AVERROR_EOF) {
4030  print_error(is->filename, ret);
4031  if (exit_on_error)
4032  exit_program(1);
4033  }
4034 
4035  for (i = 0; i < ifile->nb_streams; i++) {
4036  ist = input_streams[ifile->ist_index + i];
4037  if (ist->decoding_needed) {
4038  ret = process_input_packet(ist, NULL, 0);
4039  if (ret>0)
4040  return 0;
4041  }
4042 
4043  /* mark all outputs that don't go through lavfi as finished */
4044  for (j = 0; j < nb_output_streams; j++) {
4045  OutputStream *ost = output_streams[j];
4046 
4047  if (ost->source_index == ifile->ist_index + i &&
4048  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4049  finish_output_stream(ost);
4050  }
4051  }
4052 
4053  ifile->eof_reached = 1;
4054  return AVERROR(EAGAIN);
4055  }
4056 
4057  reset_eagain();
4058 
4059  if (do_pkt_dump) {
4061  is->streams[pkt.stream_index]);
4062  }
4063  /* the following test is needed in case new streams appear
4064  dynamically in stream : we ignore them */
4065  if (pkt.stream_index >= ifile->nb_streams) {
4066  report_new_stream(file_index, &pkt);
4067  goto discard_packet;
4068  }
4069 
4070  ist = input_streams[ifile->ist_index + pkt.stream_index];
4071 
4072  ist->data_size += pkt.size;
4073  ist->nb_packets++;
4074 
4075  if (ist->discard)
4076  goto discard_packet;
4077 
4078  if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4079  av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4080  exit_program(1);
4081  }
4082 
4083  if (debug_ts) {
4084  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4085  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4089  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4090  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4091  av_ts2str(input_files[ist->file_index]->ts_offset),
4092  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4093  }
4094 
4095  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4096  int64_t stime, stime2;
4097  // Correcting starttime based on the enabled streams
4098  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4099  // so we instead do it here as part of discontinuity handling
4100  if ( ist->next_dts == AV_NOPTS_VALUE
4101  && ifile->ts_offset == -is->start_time
4102  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4103  int64_t new_start_time = INT64_MAX;
4104  for (i=0; i<is->nb_streams; i++) {
4105  AVStream *st = is->streams[i];
4106  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4107  continue;
4108  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4109  }
4110  if (new_start_time > is->start_time) {
4111  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4112  ifile->ts_offset = -new_start_time;
4113  }
4114  }
4115 
4116  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4117  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4118  ist->wrap_correction_done = 1;
4119 
4120  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4121  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4122  ist->wrap_correction_done = 0;
4123  }
4124  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4125  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4126  ist->wrap_correction_done = 0;
4127  }
4128  }
4129 
4130  /* add the stream-global side data to the first packet */
4131  if (ist->nb_packets == 1) {
4132  if (ist->st->nb_side_data)
4134  for (i = 0; i < ist->st->nb_side_data; i++) {
4135  AVPacketSideData *src_sd = &ist->st->side_data[i];
4136  uint8_t *dst_data;
4137 
4138  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4139  continue;
4140  if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4141  continue;
4142 
4143  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4144  if (!dst_data)
4145  exit_program(1);
4146 
4147  memcpy(dst_data, src_sd->data, src_sd->size);
4148  }
4149  }
4150 
4151  if (pkt.dts != AV_NOPTS_VALUE)
4152  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4153  if (pkt.pts != AV_NOPTS_VALUE)
4154  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4155 
4156  if (pkt.pts != AV_NOPTS_VALUE)
4157  pkt.pts *= ist->ts_scale;
4158  if (pkt.dts != AV_NOPTS_VALUE)
4159  pkt.dts *= ist->ts_scale;
4160 
4162  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4164  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4165  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4166  int64_t delta = pkt_dts - ifile->last_ts;
4167  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4168  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4169  ifile->ts_offset -= delta;
4171  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4172  delta, ifile->ts_offset);
4173  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4174  if (pkt.pts != AV_NOPTS_VALUE)
4175  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4176  }
4177  }
4178 
4179  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4180  if (pkt.pts != AV_NOPTS_VALUE) {
4181  pkt.pts += duration;
4182  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4183  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4184  }
4185 
4186  if (pkt.dts != AV_NOPTS_VALUE)
4187  pkt.dts += duration;
4188 
4190  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4192  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4193  !copy_ts) {
4194  int64_t delta = pkt_dts - ist->next_dts;
4195  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4196  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4197  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4198  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4199  ifile->ts_offset -= delta;
4201  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4202  delta, ifile->ts_offset);
4203  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4204  if (pkt.pts != AV_NOPTS_VALUE)
4205  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4206  }
4207  } else {
4208  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4209  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4210  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4211  pkt.dts = AV_NOPTS_VALUE;
4212  }
4213  if (pkt.pts != AV_NOPTS_VALUE){
4214  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4215  delta = pkt_pts - ist->next_dts;
4216  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4217  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4218  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4219  pkt.pts = AV_NOPTS_VALUE;
4220  }
4221  }
4222  }
4223  }
4224 
4225  if (pkt.dts != AV_NOPTS_VALUE)
4226  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4227 
4228  if (debug_ts) {
4229  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4231  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4232  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4233  av_ts2str(input_files[ist->file_index]->ts_offset),
4234  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4235  }
4236 
4237  sub2video_heartbeat(ist, pkt.pts);
4238 
4239  process_input_packet(ist, &pkt, 0);
4240 
4241 discard_packet:
4242  av_packet_unref(&pkt);
4243 
4244  return 0;
4245 }
4246 
4247 /**
4248  * Perform a step of transcoding for the specified filter graph.
4249  *
4250  * @param[in] graph filter graph to consider
4251  * @param[out] best_ist input stream where a frame would allow to continue
4252  * @return 0 for success, <0 for error
4253  */
4254 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4255 {
4256  int i, ret;
4257  int nb_requests, nb_requests_max = 0;
4258  InputFilter *ifilter;
4259  InputStream *ist;
4260 
4261  *best_ist = NULL;
4262  ret = avfilter_graph_request_oldest(graph->graph);
4263  if (ret >= 0)
4264  return reap_filters(0);
4265 
4266  if (ret == AVERROR_EOF) {
4267  ret = reap_filters(1);
4268  for (i = 0; i < graph->nb_outputs; i++)
4269  close_output_stream(graph->outputs[i]->ost);
4270  return ret;
4271  }
4272  if (ret != AVERROR(EAGAIN))
4273  return ret;
4274 
4275  for (i = 0; i < graph->nb_inputs; i++) {
4276  ifilter = graph->inputs[i];
4277  ist = ifilter->ist;
4278  if (input_files[ist->file_index]->eagain ||
4279  input_files[ist->file_index]->eof_reached)
4280  continue;
4281  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4282  if (nb_requests > nb_requests_max) {
4283  nb_requests_max = nb_requests;
4284  *best_ist = ist;
4285  }
4286  }
4287 
4288  if (!*best_ist)
4289  for (i = 0; i < graph->nb_outputs; i++)
4290  graph->outputs[i]->ost->unavailable = 1;
4291 
4292  return 0;
4293 }
4294 
4295 /**
4296  * Run a single step of transcoding.
4297  *
4298  * @return 0 for success, <0 for error
4299  */
4300 static int transcode_step(void)
4301 {
4302  OutputStream *ost;
4303  InputStream *ist;
4304  int ret;
4305 
4306  ost = choose_output();
4307  if (!ost) {
4308  if (got_eagain()) {
4309  reset_eagain();
4310  av_usleep(10000);
4311  return 0;
4312  }
4313  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4314  return AVERROR_EOF;
4315  }
4316 
4317  if (ost->filter) {
4318  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4319  return ret;
4320  if (!ist)
4321  return 0;
4322  } else {
4323  av_assert0(ost->source_index >= 0);
4324  ist = input_streams[ost->source_index];
4325  }
4326 
4327  ret = process_input(ist->file_index);
4328  if (ret == AVERROR(EAGAIN)) {
4329  if (input_files[ist->file_index]->eagain)
4330  ost->unavailable = 1;
4331  return 0;
4332  }
4333 
4334  if (ret < 0)
4335  return ret == AVERROR_EOF ? 0 : ret;
4336 
4337  return reap_filters(0);
4338 }
4339 
4340 /*
4341  * The following code is the main loop of the file converter
4342  */
4343 static int transcode(void)
4344 {
4345  int ret, i;
4346  AVFormatContext *os;
4347  OutputStream *ost;
4348  InputStream *ist;
4349  int64_t timer_start;
4350  int64_t total_packets_written = 0;
4351 
4352  ret = transcode_init();
4353  if (ret < 0)
4354  goto fail;
4355 
4356  if (stdin_interaction) {
4357  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4358  }
4359 
4360  timer_start = av_gettime_relative();
4361 
4362 #if HAVE_PTHREADS
4363  if ((ret = init_input_threads()) < 0)
4364  goto fail;
4365 #endif
4366 
4367  while (!received_sigterm) {
4368  int64_t cur_time= av_gettime_relative();
4369 
4370  /* if 'q' pressed, exits */
4371  if (stdin_interaction)
4372  if (check_keyboard_interaction(cur_time) < 0)
4373  break;
4374 
4375  /* check if there's any stream where output is still needed */
4376  if (!need_output()) {
4377  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4378  break;
4379  }
4380 
4381  ret = transcode_step();
4382  if (ret < 0 && ret != AVERROR_EOF) {
4383  char errbuf[128];
4384  av_strerror(ret, errbuf, sizeof(errbuf));
4385 
4386  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4387  break;
4388  }
4389 
4390  /* dump report by using the output first video and audio streams */
4391  print_report(0, timer_start, cur_time);
4392  }
4393 #if HAVE_PTHREADS
4394  free_input_threads();
4395 #endif
4396 
4397  /* at the end of stream, we must flush the decoder buffers */
4398  for (i = 0; i < nb_input_streams; i++) {
4399  ist = input_streams[i];
4400  if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4401  process_input_packet(ist, NULL, 0);
4402  }
4403  }
4404  flush_encoders();
4405 
4406  term_exit();
4407 
4408  /* write the trailer if needed and close file */
4409  for (i = 0; i < nb_output_files; i++) {
4410  os = output_files[i]->ctx;
4411  if (!output_files[i]->header_written) {
4413  "Nothing was written into output file %d (%s), because "
4414  "at least one of its streams received no packets.\n",
4415  i, os->filename);
4416  continue;
4417  }
4418  if ((ret = av_write_trailer(os)) < 0) {
4419  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4420  if (exit_on_error)
4421  exit_program(1);
4422  }
4423  }
4424 
4425  /* dump report by using the first video and audio streams */
4426  print_report(1, timer_start, av_gettime_relative());
4427 
4428  /* close each encoder */
4429  for (i = 0; i < nb_output_streams; i++) {
4430  ost = output_streams[i];
4431  if (ost->encoding_needed) {
4432  av_freep(&ost->enc_ctx->stats_in);
4433  }
4434  total_packets_written += ost->packets_written;
4435  }
4436 
4437  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4438  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4439  exit_program(1);
4440  }
4441 
4442  /* close each decoder */
4443  for (i = 0; i < nb_input_streams; i++) {
4444  ist = input_streams[i];
4445  if (ist->decoding_needed) {
4446  avcodec_close(ist->dec_ctx);
4447  if (ist->hwaccel_uninit)
4448  ist->hwaccel_uninit(ist->dec_ctx);
4449  }
4450  }
4451 
4453 
4454  /* finished ! */
4455  ret = 0;
4456 
4457  fail:
4458 #if HAVE_PTHREADS
4459  free_input_threads();
4460 #endif
4461 
4462  if (output_streams) {
4463  for (i = 0; i < nb_output_streams; i++) {
4464  ost = output_streams[i];
4465  if (ost) {
4466  if (ost->logfile) {
4467  if (fclose(ost->logfile))
4469  "Error closing logfile, loss of information possible: %s\n",
4470  av_err2str(AVERROR(errno)));
4471  ost->logfile = NULL;
4472  }
4473  av_freep(&ost->forced_kf_pts);
4474  av_freep(&ost->apad);
4475  av_freep(&ost->disposition);
4476  av_dict_free(&ost->encoder_opts);
4477  av_dict_free(&ost->sws_dict);
4478  av_dict_free(&ost->swr_opts);
4479  av_dict_free(&ost->resample_opts);
4480  }
4481  }
4482  }
4483  return ret;
4484 }
4485 
4486 
4487 static int64_t getutime(void)
4488 {
4489 #if HAVE_GETRUSAGE
4490  struct rusage rusage;
4491 
4492  getrusage(RUSAGE_SELF, &rusage);
4493  return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4494 #elif HAVE_GETPROCESSTIMES
4495  HANDLE proc;
4496  FILETIME c, e, k, u;
4497  proc = GetCurrentProcess();
4498  GetProcessTimes(proc, &c, &e, &k, &u);
4499  return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4500 #else
4501  return av_gettime_relative();
4502 #endif
4503 }
4504 
4505 static int64_t getmaxrss(void)
4506 {
4507 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4508  struct rusage rusage;
4509  getrusage(RUSAGE_SELF, &rusage);
4510  return (int64_t)rusage.ru_maxrss * 1024;
4511 #elif HAVE_GETPROCESSMEMORYINFO
4512  HANDLE proc;
4513  PROCESS_MEMORY_COUNTERS memcounters;
4514  proc = GetCurrentProcess();
4515  memcounters.cb = sizeof(memcounters);
4516  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4517  return memcounters.PeakPagefileUsage;
4518 #else
4519  return 0;
4520 #endif
4521 }
4522 
4523 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4524 {
4525 }
4526 
4527 int main(int argc, char **argv)
4528 {
4529  int i, ret;
4530  int64_t ti;
4531 
4532  init_dynload();
4533 
4535 
4536  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4537 
4539  parse_loglevel(argc, argv, options);
4540 
4541  if(argc>1 && !strcmp(argv[1], "-d")){
4542  run_as_daemon=1;
4544  argc--;
4545  argv++;
4546  }
4547 
4549 #if CONFIG_AVDEVICE
4551 #endif
4553  av_register_all();
4555 
4556  show_banner(argc, argv, options);
4557 
4558  /* parse options and open all input/output files */
4559  ret = ffmpeg_parse_options(argc, argv);
4560  if (ret < 0)
4561  exit_program(1);
4562 
4563  if (nb_output_files <= 0 && nb_input_files == 0) {
4564  show_usage();
4565  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4566  exit_program(1);
4567  }
4568 
4569  /* file converter / grab */
4570  if (nb_output_files <= 0) {
4571  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4572  exit_program(1);
4573  }
4574 
4575 // if (nb_input_files == 0) {
4576 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4577 // exit_program(1);
4578 // }
4579 
4580  for (i = 0; i < nb_output_files; i++) {
4581  if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4582  want_sdp = 0;
4583  }
4584 
4585  current_time = ti = getutime();
4586  if (transcode() < 0)
4587  exit_program(1);
4588  ti = getutime() - ti;
4589  if (do_benchmark) {
4590  av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4591  }
4592  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4595  exit_program(69);
4596 
4598  return main_return_code;
4599 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1543
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:305
int nb_bitstream_filters
Definition: ffmpeg.h:448
#define extra_bits(eb)
Definition: intrax8.c:159
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:895
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:119
uint32_t BOOL
#define FF_DEBUG_VIS_MB_TYPE
only access through AVOptions from outside libavcodec
Definition: avcodec.h:2940
int got_output
Definition: ffmpeg.h:333
#define AV_DISPOSITION_METADATA
Definition: avformat.h:873
void av_bsf_free(AVBSFContext **ctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:36
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:1892
int64_t first_dts
Timestamp corresponding to the last dts sync point.
Definition: avformat.h:1060
int guess_input_channel_layout(InputStream *ist)
Definition: ffmpeg.c:2005
#define NULL
Definition: coverity.c:32
int frame_number
Definition: ffmpeg.h:437
const struct AVCodec * codec
Definition: avcodec.h:1685
Definition: ffmpeg.h:414
AVRational framerate
Definition: avcodec.h:3376
enum AVFieldOrder field_order
Video only.
Definition: avcodec.h:4062
static void do_subtitle_out(OutputFile *of, OutputStream *ost, AVSubtitle *sub)
Definition: ffmpeg.c:925
enum HWAccelID active_hwaccel_id
Definition: ffmpeg.h:360
const char * s
Definition: avisynth_c.h:768
Bytestream IO Context.
Definition: avio.h:147
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static enum AVPixelFormat pix_fmt
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:544
void term_init(void)
Definition: ffmpeg.c:369
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:309
AVCodecParameters * par_out
Parameters of the output stream.
Definition: avcodec.h:5763
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
int64_t av_frame_get_pkt_duration(const AVFrame *frame)
uint8_t * name
Definition: ffmpeg.h:252
int nb_outputs
Definition: ffmpeg.h:281
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:124
AVDictionary * swr_opts
Definition: ffmpeg.h:494
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:291
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2332
int resample_channels
Definition: ffmpeg.h:328
#define av_realloc_f(p, o, n)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:184
int avfilter_graph_queue_command(AVFilterGraph *graph,