FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdint.h>
34 
35 #if HAVE_IO_H
36 #include <io.h>
37 #endif
38 #if HAVE_UNISTD_H
39 #include <unistd.h>
40 #endif
41 
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
45 #include "libavutil/opt.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
64 
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
68 
69 #if HAVE_SYS_RESOURCE_H
70 #include <sys/time.h>
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
74 #include <windows.h>
75 #endif
76 #if HAVE_GETPROCESSMEMORYINFO
77 #include <windows.h>
78 #include <psapi.h>
79 #endif
80 #if HAVE_SETCONSOLECTRLHANDLER
81 #include <windows.h>
82 #endif
83 
84 
85 #if HAVE_SYS_SELECT_H
86 #include <sys/select.h>
87 #endif
88 
89 #if HAVE_TERMIOS_H
90 #include <fcntl.h>
91 #include <sys/ioctl.h>
92 #include <sys/time.h>
93 #include <termios.h>
94 #elif HAVE_KBHIT
95 #include <conio.h>
96 #endif
97 
98 #if HAVE_PTHREADS
99 #include <pthread.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
126 
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static unsigned dup_warning = 1000;
130 static int nb_frames_drop = 0;
131 static int64_t decode_error_stat[2];
132 
133 static int want_sdp = 1;
134 
135 static int current_time;
137 
139 
144 
149 
152 
153 #if HAVE_TERMIOS_H
154 
155 /* init terminal so that we can grab keys */
156 static struct termios oldtty;
157 static int restore_tty;
158 #endif
159 
160 #if HAVE_PTHREADS
161 static void free_input_threads(void);
162 #endif
163 
164 /* sub2video hack:
165  Convert subtitles to video with alpha to insert them in filter graphs.
166  This is a temporary solution until libavfilter gets real subtitles support.
167  */
168 
170 {
171  int ret;
172  AVFrame *frame = ist->sub2video.frame;
173 
174  av_frame_unref(frame);
175  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
176  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
178  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
179  return ret;
180  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
181  return 0;
182 }
183 
184 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
185  AVSubtitleRect *r)
186 {
187  uint32_t *pal, *dst2;
188  uint8_t *src, *src2;
189  int x, y;
190 
191  if (r->type != SUBTITLE_BITMAP) {
192  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
193  return;
194  }
195  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
196  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
197  r->x, r->y, r->w, r->h, w, h
198  );
199  return;
200  }
201 
202  dst += r->y * dst_linesize + r->x * 4;
203  src = r->data[0];
204  pal = (uint32_t *)r->data[1];
205  for (y = 0; y < r->h; y++) {
206  dst2 = (uint32_t *)dst;
207  src2 = src;
208  for (x = 0; x < r->w; x++)
209  *(dst2++) = pal[*(src2++)];
210  dst += dst_linesize;
211  src += r->linesize[0];
212  }
213 }
214 
215 static void sub2video_push_ref(InputStream *ist, int64_t pts)
216 {
217  AVFrame *frame = ist->sub2video.frame;
218  int i;
219 
220  av_assert1(frame->data[0]);
221  ist->sub2video.last_pts = frame->pts = pts;
222  for (i = 0; i < ist->nb_filters; i++)
226 }
227 
228 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
229 {
230  AVFrame *frame = ist->sub2video.frame;
231  int8_t *dst;
232  int dst_linesize;
233  int num_rects, i;
234  int64_t pts, end_pts;
235 
236  if (!frame)
237  return;
238  if (sub) {
239  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
240  AV_TIME_BASE_Q, ist->st->time_base);
241  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
242  AV_TIME_BASE_Q, ist->st->time_base);
243  num_rects = sub->num_rects;
244  } else {
245  pts = ist->sub2video.end_pts;
246  end_pts = INT64_MAX;
247  num_rects = 0;
248  }
249  if (sub2video_get_blank_frame(ist) < 0) {
251  "Impossible to get a blank canvas.\n");
252  return;
253  }
254  dst = frame->data [0];
255  dst_linesize = frame->linesize[0];
256  for (i = 0; i < num_rects; i++)
257  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
258  sub2video_push_ref(ist, pts);
259  ist->sub2video.end_pts = end_pts;
260 }
261 
262 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
263 {
264  InputFile *infile = input_files[ist->file_index];
265  int i, j, nb_reqs;
266  int64_t pts2;
267 
268  /* When a frame is read from a file, examine all sub2video streams in
269  the same file and send the sub2video frame again. Otherwise, decoded
270  video frames could be accumulating in the filter graph while a filter
271  (possibly overlay) is desperately waiting for a subtitle frame. */
272  for (i = 0; i < infile->nb_streams; i++) {
273  InputStream *ist2 = input_streams[infile->ist_index + i];
274  if (!ist2->sub2video.frame)
275  continue;
276  /* subtitles seem to be usually muxed ahead of other streams;
277  if not, subtracting a larger time here is necessary */
278  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
279  /* do not send the heartbeat frame if the subtitle is already ahead */
280  if (pts2 <= ist2->sub2video.last_pts)
281  continue;
282  if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
283  sub2video_update(ist2, NULL);
284  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
285  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
286  if (nb_reqs)
287  sub2video_push_ref(ist2, pts2);
288  }
289 }
290 
291 static void sub2video_flush(InputStream *ist)
292 {
293  int i;
294 
295  if (ist->sub2video.end_pts < INT64_MAX)
296  sub2video_update(ist, NULL);
297  for (i = 0; i < ist->nb_filters; i++)
299 }
300 
301 /* end of sub2video hack */
302 
303 static void term_exit_sigsafe(void)
304 {
305 #if HAVE_TERMIOS_H
306  if(restore_tty)
307  tcsetattr (0, TCSANOW, &oldtty);
308 #endif
309 }
310 
311 void term_exit(void)
312 {
313  av_log(NULL, AV_LOG_QUIET, "%s", "");
315 }
316 
317 static volatile int received_sigterm = 0;
318 static volatile int received_nb_signals = 0;
319 static volatile int transcode_init_done = 0;
320 static volatile int ffmpeg_exited = 0;
321 static int main_return_code = 0;
322 
323 static void
325 {
326  received_sigterm = sig;
329  if(received_nb_signals > 3) {
330  write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
331  strlen("Received > 3 system signals, hard exiting\n"));
332 
333  exit(123);
334  }
335 }
336 
337 #if HAVE_SETCONSOLECTRLHANDLER
338 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
339 {
340  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
341 
342  switch (fdwCtrlType)
343  {
344  case CTRL_C_EVENT:
345  case CTRL_BREAK_EVENT:
346  sigterm_handler(SIGINT);
347  return TRUE;
348 
349  case CTRL_CLOSE_EVENT:
350  case CTRL_LOGOFF_EVENT:
351  case CTRL_SHUTDOWN_EVENT:
352  sigterm_handler(SIGTERM);
353  /* Basically, with these 3 events, when we return from this method the
354  process is hard terminated, so stall as long as we need to
355  to try and let the main thread(s) clean up and gracefully terminate
356  (we have at most 5 seconds, but should be done far before that). */
357  while (!ffmpeg_exited) {
358  Sleep(0);
359  }
360  return TRUE;
361 
362  default:
363  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
364  return FALSE;
365  }
366 }
367 #endif
368 
369 void term_init(void)
370 {
371 #if HAVE_TERMIOS_H
373  struct termios tty;
374  if (tcgetattr (0, &tty) == 0) {
375  oldtty = tty;
376  restore_tty = 1;
377 
378  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
379  |INLCR|IGNCR|ICRNL|IXON);
380  tty.c_oflag |= OPOST;
381  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
382  tty.c_cflag &= ~(CSIZE|PARENB);
383  tty.c_cflag |= CS8;
384  tty.c_cc[VMIN] = 1;
385  tty.c_cc[VTIME] = 0;
386 
387  tcsetattr (0, TCSANOW, &tty);
388  }
389  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
390  }
391 #endif
392 
393  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
394  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
395 #ifdef SIGXCPU
396  signal(SIGXCPU, sigterm_handler);
397 #endif
398 #if HAVE_SETCONSOLECTRLHANDLER
399  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
400 #endif
401 }
402 
403 /* read a key without blocking */
404 static int read_key(void)
405 {
406  unsigned char ch;
407 #if HAVE_TERMIOS_H
408  int n = 1;
409  struct timeval tv;
410  fd_set rfds;
411 
412  FD_ZERO(&rfds);
413  FD_SET(0, &rfds);
414  tv.tv_sec = 0;
415  tv.tv_usec = 0;
416  n = select(1, &rfds, NULL, NULL, &tv);
417  if (n > 0) {
418  n = read(0, &ch, 1);
419  if (n == 1)
420  return ch;
421 
422  return n;
423  }
424 #elif HAVE_KBHIT
425 # if HAVE_PEEKNAMEDPIPE
426  static int is_pipe;
427  static HANDLE input_handle;
428  DWORD dw, nchars;
429  if(!input_handle){
430  input_handle = GetStdHandle(STD_INPUT_HANDLE);
431  is_pipe = !GetConsoleMode(input_handle, &dw);
432  }
433 
434  if (is_pipe) {
435  /* When running under a GUI, you will end here. */
436  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
437  // input pipe may have been closed by the program that ran ffmpeg
438  return -1;
439  }
440  //Read it
441  if(nchars != 0) {
442  read(0, &ch, 1);
443  return ch;
444  }else{
445  return -1;
446  }
447  }
448 # endif
449  if(kbhit())
450  return(getch());
451 #endif
452  return -1;
453 }
454 
455 static int decode_interrupt_cb(void *ctx)
456 {
458 }
459 
461 
462 static void ffmpeg_cleanup(int ret)
463 {
464  int i, j;
465 
466  if (do_benchmark) {
467  int maxrss = getmaxrss() / 1024;
468  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
469  }
470 
471  for (i = 0; i < nb_filtergraphs; i++) {
472  FilterGraph *fg = filtergraphs[i];
474  for (j = 0; j < fg->nb_inputs; j++) {
476  av_freep(&fg->inputs[j]->name);
477  av_freep(&fg->inputs[j]);
478  }
479  av_freep(&fg->inputs);
480  for (j = 0; j < fg->nb_outputs; j++) {
481  av_freep(&fg->outputs[j]->name);
482  av_freep(&fg->outputs[j]->formats);
483  av_freep(&fg->outputs[j]->channel_layouts);
484  av_freep(&fg->outputs[j]->sample_rates);
485  av_freep(&fg->outputs[j]);
486  }
487  av_freep(&fg->outputs);
488  av_freep(&fg->graph_desc);
489 
490  av_freep(&filtergraphs[i]);
491  }
492  av_freep(&filtergraphs);
493 
495 
496  /* close files */
497  for (i = 0; i < nb_output_files; i++) {
498  OutputFile *of = output_files[i];
500  if (!of)
501  continue;
502  s = of->ctx;
503  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
504  avio_closep(&s->pb);
506  av_dict_free(&of->opts);
507 
508  av_freep(&output_files[i]);
509  }
510  for (i = 0; i < nb_output_streams; i++) {
511  OutputStream *ost = output_streams[i];
512 
513  if (!ost)
514  continue;
515 
516  for (j = 0; j < ost->nb_bitstream_filters; j++)
517  av_bsf_free(&ost->bsf_ctx[j]);
518  av_freep(&ost->bsf_ctx);
520 
522  av_frame_free(&ost->last_frame);
523  av_dict_free(&ost->encoder_opts);
524 
525  av_parser_close(ost->parser);
527 
528  av_freep(&ost->forced_keyframes);
530  av_freep(&ost->avfilter);
531  av_freep(&ost->logfile_prefix);
532 
534  ost->audio_channels_mapped = 0;
535 
536  av_dict_free(&ost->sws_dict);
537 
540 
541  while (ost->muxing_queue && av_fifo_size(ost->muxing_queue)) {
542  AVPacket pkt;
543  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
544  av_packet_unref(&pkt);
545  }
547 
548  av_freep(&output_streams[i]);
549  }
550 #if HAVE_PTHREADS
551  free_input_threads();
552 #endif
553  for (i = 0; i < nb_input_files; i++) {
554  avformat_close_input(&input_files[i]->ctx);
555  av_freep(&input_files[i]);
556  }
557  for (i = 0; i < nb_input_streams; i++) {
558  InputStream *ist = input_streams[i];
559 
562  av_dict_free(&ist->decoder_opts);
565  av_freep(&ist->filters);
566  av_freep(&ist->hwaccel_device);
567  av_freep(&ist->dts_buffer);
568 
570 
571  av_freep(&input_streams[i]);
572  }
573 
574  if (vstats_file) {
575  if (fclose(vstats_file))
577  "Error closing vstats file, loss of information possible: %s\n",
578  av_err2str(AVERROR(errno)));
579  }
581 
582  av_freep(&input_streams);
583  av_freep(&input_files);
584  av_freep(&output_streams);
585  av_freep(&output_files);
586 
587  uninit_opts();
588 
590 
591  if (received_sigterm) {
592  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
593  (int) received_sigterm);
594  } else if (ret && transcode_init_done) {
595  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
596  }
597  term_exit();
598  ffmpeg_exited = 1;
599 }
600 
602 {
603  AVDictionaryEntry *t = NULL;
604 
605  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
607  }
608 }
609 
611 {
613  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
614  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
615  exit_program(1);
616  }
617 }
618 
619 static void abort_codec_experimental(AVCodec *c, int encoder)
620 {
621  exit_program(1);
622 }
623 
624 static void update_benchmark(const char *fmt, ...)
625 {
626  if (do_benchmark_all) {
627  int64_t t = getutime();
628  va_list va;
629  char buf[1024];
630 
631  if (fmt) {
632  va_start(va, fmt);
633  vsnprintf(buf, sizeof(buf), fmt, va);
634  va_end(va);
635  av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
636  }
637  current_time = t;
638  }
639 }
640 
641 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
642 {
643  int i;
644  for (i = 0; i < nb_output_streams; i++) {
645  OutputStream *ost2 = output_streams[i];
646  ost2->finished |= ost == ost2 ? this_stream : others;
647  }
648 }
649 
651 {
652  AVFormatContext *s = of->ctx;
653  AVStream *st = ost->st;
654  int ret;
655 
656  if (!of->header_written) {
657  AVPacket tmp_pkt;
658  /* the muxer is not initialized yet, buffer the packet */
659  if (!av_fifo_space(ost->muxing_queue)) {
660  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
661  ost->max_muxing_queue_size);
662  if (new_size <= av_fifo_size(ost->muxing_queue)) {
664  "Too many packets buffered for output stream %d:%d.\n",
665  ost->file_index, ost->st->index);
666  exit_program(1);
667  }
668  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
669  if (ret < 0)
670  exit_program(1);
671  }
672  av_packet_move_ref(&tmp_pkt, pkt);
673  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
674  return;
675  }
676 
679  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
680 
681  /*
682  * Audio encoders may split the packets -- #frames in != #packets out.
683  * But there is no reordering, so we can limit the number of output packets
684  * by simply dropping them here.
685  * Counting encoded video frames needs to be done separately because of
686  * reordering, see do_video_out()
687  */
688  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
689  if (ost->frame_number >= ost->max_frames) {
690  av_packet_unref(pkt);
691  return;
692  }
693  ost->frame_number++;
694  }
695  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
696  int i;
698  NULL);
699  ost->quality = sd ? AV_RL32(sd) : -1;
700  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
701 
702  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
703  if (sd && i < sd[5])
704  ost->error[i] = AV_RL64(sd + 8 + 8*i);
705  else
706  ost->error[i] = -1;
707  }
708 
709  if (ost->frame_rate.num && ost->is_cfr) {
710  if (pkt->duration > 0)
711  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
712  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
713  ost->st->time_base);
714  }
715  }
716 
717  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
718  if (pkt->dts != AV_NOPTS_VALUE &&
719  pkt->pts != AV_NOPTS_VALUE &&
720  pkt->dts > pkt->pts) {
721  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
722  pkt->dts, pkt->pts,
723  ost->file_index, ost->st->index);
724  pkt->pts =
725  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
726  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
727  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
728  }
730  pkt->dts != AV_NOPTS_VALUE &&
731  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
732  ost->last_mux_dts != AV_NOPTS_VALUE) {
733  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
734  if (pkt->dts < max) {
735  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
736  av_log(s, loglevel, "Non-monotonous DTS in output stream "
737  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
738  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
739  if (exit_on_error) {
740  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
741  exit_program(1);
742  }
743  av_log(s, loglevel, "changing to %"PRId64". This may result "
744  "in incorrect timestamps in the output file.\n",
745  max);
746  if (pkt->pts >= pkt->dts)
747  pkt->pts = FFMAX(pkt->pts, max);
748  pkt->dts = max;
749  }
750  }
751  }
752  ost->last_mux_dts = pkt->dts;
753 
754  ost->data_size += pkt->size;
755  ost->packets_written++;
756 
757  pkt->stream_index = ost->index;
758 
759  if (debug_ts) {
760  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
761  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
763  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
764  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
765  pkt->size
766  );
767  }
768 
769  ret = av_interleaved_write_frame(s, pkt);
770  if (ret < 0) {
771  print_error("av_interleaved_write_frame()", ret);
772  main_return_code = 1;
774  }
775  av_packet_unref(pkt);
776 }
777 
779 {
780  OutputFile *of = output_files[ost->file_index];
781 
782  ost->finished |= ENCODER_FINISHED;
783  if (of->shortest) {
784  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
785  of->recording_time = FFMIN(of->recording_time, end);
786  }
787 }
788 
790 {
791  int ret = 0;
792 
793  /* apply the output bitstream filters, if any */
794  if (ost->nb_bitstream_filters) {
795  int idx;
796 
798  ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
799  if (ret < 0)
800  goto finish;
801 
802  idx = 1;
803  while (idx) {
804  /* get a packet from the previous filter up the chain */
805  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
806  if (ret == AVERROR(EAGAIN)) {
807  ret = 0;
808  idx--;
809  continue;
810  } else if (ret < 0)
811  goto finish;
812  /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
813  * the api states this shouldn't happen after init(). Propagate it here to the
814  * muxer and to the next filters in the chain to workaround this.
815  * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
816  * par_out->extradata and adapt muxers accordingly to get rid of this. */
817  if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
818  ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
819  if (ret < 0)
820  goto finish;
821  ost->bsf_extradata_updated[idx - 1] |= 1;
822  }
823 
824  /* send it to the next filter down the chain or to the muxer */
825  if (idx < ost->nb_bitstream_filters) {
826  /* HACK/FIXME! - See above */
827  if (!(ost->bsf_extradata_updated[idx] & 2)) {
828  ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
829  if (ret < 0)
830  goto finish;
831  ost->bsf_extradata_updated[idx] |= 2;
832  }
833  ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
834  if (ret < 0)
835  goto finish;
836  idx++;
837  } else
838  write_packet(of, pkt, ost);
839  }
840  } else
841  write_packet(of, pkt, ost);
842 
843 finish:
844  if (ret < 0 && ret != AVERROR_EOF) {
845  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
846  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
847  if(exit_on_error)
848  exit_program(1);
849  }
850 }
851 
853 {
854  OutputFile *of = output_files[ost->file_index];
855 
856  if (of->recording_time != INT64_MAX &&
858  AV_TIME_BASE_Q) >= 0) {
859  close_output_stream(ost);
860  return 0;
861  }
862  return 1;
863 }
864 
865 static void do_audio_out(OutputFile *of, OutputStream *ost,
866  AVFrame *frame)
867 {
868  AVCodecContext *enc = ost->enc_ctx;
869  AVPacket pkt;
870  int ret;
871 
872  av_init_packet(&pkt);
873  pkt.data = NULL;
874  pkt.size = 0;
875 
876  if (!check_recording_time(ost))
877  return;
878 
879  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
880  frame->pts = ost->sync_opts;
881  ost->sync_opts = frame->pts + frame->nb_samples;
882  ost->samples_encoded += frame->nb_samples;
883  ost->frames_encoded++;
884 
885  av_assert0(pkt.size || !pkt.data);
887  if (debug_ts) {
888  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
889  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
890  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
891  enc->time_base.num, enc->time_base.den);
892  }
893 
894  ret = avcodec_send_frame(enc, frame);
895  if (ret < 0)
896  goto error;
897 
898  while (1) {
899  ret = avcodec_receive_packet(enc, &pkt);
900  if (ret == AVERROR(EAGAIN))
901  break;
902  if (ret < 0)
903  goto error;
904 
905  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
906 
907  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
908 
909  if (debug_ts) {
910  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
911  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
912  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
913  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
914  }
915 
916  output_packet(of, &pkt, ost);
917  }
918 
919  return;
920 error:
921  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
922  exit_program(1);
923 }
924 
925 static void do_subtitle_out(OutputFile *of,
926  OutputStream *ost,
927  AVSubtitle *sub)
928 {
929  int subtitle_out_max_size = 1024 * 1024;
930  int subtitle_out_size, nb, i;
931  AVCodecContext *enc;
932  AVPacket pkt;
933  int64_t pts;
934 
935  if (sub->pts == AV_NOPTS_VALUE) {
936  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
937  if (exit_on_error)
938  exit_program(1);
939  return;
940  }
941 
942  enc = ost->enc_ctx;
943 
944  if (!subtitle_out) {
945  subtitle_out = av_malloc(subtitle_out_max_size);
946  if (!subtitle_out) {
947  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
948  exit_program(1);
949  }
950  }
951 
952  /* Note: DVB subtitle need one packet to draw them and one other
953  packet to clear them */
954  /* XXX: signal it in the codec context ? */
956  nb = 2;
957  else
958  nb = 1;
959 
960  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
961  pts = sub->pts;
962  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
963  pts -= output_files[ost->file_index]->start_time;
964  for (i = 0; i < nb; i++) {
965  unsigned save_num_rects = sub->num_rects;
966 
967  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
968  if (!check_recording_time(ost))
969  return;
970 
971  sub->pts = pts;
972  // start_display_time is required to be 0
973  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
975  sub->start_display_time = 0;
976  if (i == 1)
977  sub->num_rects = 0;
978 
979  ost->frames_encoded++;
980 
981  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
982  subtitle_out_max_size, sub);
983  if (i == 1)
984  sub->num_rects = save_num_rects;
985  if (subtitle_out_size < 0) {
986  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
987  exit_program(1);
988  }
989 
990  av_init_packet(&pkt);
991  pkt.data = subtitle_out;
992  pkt.size = subtitle_out_size;
993  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
994  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
995  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
996  /* XXX: the pts correction is handled here. Maybe handling
997  it in the codec would be better */
998  if (i == 0)
999  pkt.pts += 90 * sub->start_display_time;
1000  else
1001  pkt.pts += 90 * sub->end_display_time;
1002  }
1003  pkt.dts = pkt.pts;
1004  output_packet(of, &pkt, ost);
1005  }
1006 }
1007 
1008 static void do_video_out(OutputFile *of,
1009  OutputStream *ost,
1010  AVFrame *next_picture,
1011  double sync_ipts)
1012 {
1013  int ret, format_video_sync;
1014  AVPacket pkt;
1015  AVCodecContext *enc = ost->enc_ctx;
1016  AVCodecParameters *mux_par = ost->st->codecpar;
1017  AVRational frame_rate;
1018  int nb_frames, nb0_frames, i;
1019  double delta, delta0;
1020  double duration = 0;
1021  int frame_size = 0;
1022  InputStream *ist = NULL;
1024 
1025  if (ost->source_index >= 0)
1026  ist = input_streams[ost->source_index];
1027 
1028  frame_rate = av_buffersink_get_frame_rate(filter);
1029  if (frame_rate.num > 0 && frame_rate.den > 0)
1030  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1031 
1032  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1033  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1034 
1035  if (!ost->filters_script &&
1036  !ost->filters &&
1037  next_picture &&
1038  ist &&
1039  lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1040  duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1041  }
1042 
1043  if (!next_picture) {
1044  //end, flushing
1045  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1046  ost->last_nb0_frames[1],
1047  ost->last_nb0_frames[2]);
1048  } else {
1049  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1050  delta = delta0 + duration;
1051 
1052  /* by default, we output a single frame */
1053  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1054  nb_frames = 1;
1055 
1056  format_video_sync = video_sync_method;
1057  if (format_video_sync == VSYNC_AUTO) {
1058  if(!strcmp(of->ctx->oformat->name, "avi")) {
1059  format_video_sync = VSYNC_VFR;
1060  } else
1061  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1062  if ( ist
1063  && format_video_sync == VSYNC_CFR
1064  && input_files[ist->file_index]->ctx->nb_streams == 1
1065  && input_files[ist->file_index]->input_ts_offset == 0) {
1066  format_video_sync = VSYNC_VSCFR;
1067  }
1068  if (format_video_sync == VSYNC_CFR && copy_ts) {
1069  format_video_sync = VSYNC_VSCFR;
1070  }
1071  }
1072  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1073 
1074  if (delta0 < 0 &&
1075  delta > 0 &&
1076  format_video_sync != VSYNC_PASSTHROUGH &&
1077  format_video_sync != VSYNC_DROP) {
1078  if (delta0 < -0.6) {
1079  av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1080  } else
1081  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1082  sync_ipts = ost->sync_opts;
1083  duration += delta0;
1084  delta0 = 0;
1085  }
1086 
1087  switch (format_video_sync) {
1088  case VSYNC_VSCFR:
1089  if (ost->frame_number == 0 && delta0 >= 0.5) {
1090  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1091  delta = duration;
1092  delta0 = 0;
1093  ost->sync_opts = lrint(sync_ipts);
1094  }
1095  case VSYNC_CFR:
1096  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1097  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1098  nb_frames = 0;
1099  } else if (delta < -1.1)
1100  nb_frames = 0;
1101  else if (delta > 1.1) {
1102  nb_frames = lrintf(delta);
1103  if (delta0 > 1.1)
1104  nb0_frames = lrintf(delta0 - 0.6);
1105  }
1106  break;
1107  case VSYNC_VFR:
1108  if (delta <= -0.6)
1109  nb_frames = 0;
1110  else if (delta > 0.6)
1111  ost->sync_opts = lrint(sync_ipts);
1112  break;
1113  case VSYNC_DROP:
1114  case VSYNC_PASSTHROUGH:
1115  ost->sync_opts = lrint(sync_ipts);
1116  break;
1117  default:
1118  av_assert0(0);
1119  }
1120  }
1121 
1122  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1123  nb0_frames = FFMIN(nb0_frames, nb_frames);
1124 
1125  memmove(ost->last_nb0_frames + 1,
1126  ost->last_nb0_frames,
1127  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1128  ost->last_nb0_frames[0] = nb0_frames;
1129 
1130  if (nb0_frames == 0 && ost->last_dropped) {
1131  nb_frames_drop++;
1133  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1134  ost->frame_number, ost->st->index, ost->last_frame->pts);
1135  }
1136  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1137  if (nb_frames > dts_error_threshold * 30) {
1138  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1139  nb_frames_drop++;
1140  return;
1141  }
1142  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1143  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1144  if (nb_frames_dup > dup_warning) {
1145  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1146  dup_warning *= 10;
1147  }
1148  }
1149  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1150 
1151  /* duplicates frame if needed */
1152  for (i = 0; i < nb_frames; i++) {
1153  AVFrame *in_picture;
1154  av_init_packet(&pkt);
1155  pkt.data = NULL;
1156  pkt.size = 0;
1157 
1158  if (i < nb0_frames && ost->last_frame) {
1159  in_picture = ost->last_frame;
1160  } else
1161  in_picture = next_picture;
1162 
1163  if (!in_picture)
1164  return;
1165 
1166  in_picture->pts = ost->sync_opts;
1167 
1168 #if 1
1169  if (!check_recording_time(ost))
1170 #else
1171  if (ost->frame_number >= ost->max_frames)
1172 #endif
1173  return;
1174 
1175 #if FF_API_LAVF_FMT_RAWPICTURE
1176  if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1177  enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1178  /* raw pictures are written as AVPicture structure to
1179  avoid any copies. We support temporarily the older
1180  method. */
1181  if (in_picture->interlaced_frame)
1182  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1183  else
1184  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1185  pkt.data = (uint8_t *)in_picture;
1186  pkt.size = sizeof(AVPicture);
1187  pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1188  pkt.flags |= AV_PKT_FLAG_KEY;
1189 
1190  output_packet(of, &pkt, ost);
1191  } else
1192 #endif
1193  {
1194  int forced_keyframe = 0;
1195  double pts_time;
1196 
1198  ost->top_field_first >= 0)
1199  in_picture->top_field_first = !!ost->top_field_first;
1200 
1201  if (in_picture->interlaced_frame) {
1202  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1203  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1204  else
1205  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1206  } else
1207  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1208 
1209  in_picture->quality = enc->global_quality;
1210  in_picture->pict_type = 0;
1211 
1212  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1213  in_picture->pts * av_q2d(enc->time_base) : NAN;
1214  if (ost->forced_kf_index < ost->forced_kf_count &&
1215  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1216  ost->forced_kf_index++;
1217  forced_keyframe = 1;
1218  } else if (ost->forced_keyframes_pexpr) {
1219  double res;
1220  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1223  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1229  res);
1230  if (res) {
1231  forced_keyframe = 1;
1237  }
1238 
1240  } else if ( ost->forced_keyframes
1241  && !strncmp(ost->forced_keyframes, "source", 6)
1242  && in_picture->key_frame==1) {
1243  forced_keyframe = 1;
1244  }
1245 
1246  if (forced_keyframe) {
1247  in_picture->pict_type = AV_PICTURE_TYPE_I;
1248  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1249  }
1250 
1252  if (debug_ts) {
1253  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1254  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1255  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1256  enc->time_base.num, enc->time_base.den);
1257  }
1258 
1259  ost->frames_encoded++;
1260 
1261  ret = avcodec_send_frame(enc, in_picture);
1262  if (ret < 0)
1263  goto error;
1264 
1265  while (1) {
1266  ret = avcodec_receive_packet(enc, &pkt);
1267  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1268  if (ret == AVERROR(EAGAIN))
1269  break;
1270  if (ret < 0)
1271  goto error;
1272 
1273  if (debug_ts) {
1274  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1275  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1276  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1277  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1278  }
1279 
1280  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1281  pkt.pts = ost->sync_opts;
1282 
1283  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1284 
1285  if (debug_ts) {
1286  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1287  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1288  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1289  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1290  }
1291 
1292  frame_size = pkt.size;
1293  output_packet(of, &pkt, ost);
1294 
1295  /* if two pass, output log */
1296  if (ost->logfile && enc->stats_out) {
1297  fprintf(ost->logfile, "%s", enc->stats_out);
1298  }
1299  }
1300  }
1301  ost->sync_opts++;
1302  /*
1303  * For video, number of frames in == number of packets out.
1304  * But there may be reordering, so we can't throw away frames on encoder
1305  * flush, we need to limit them here, before they go into encoder.
1306  */
1307  ost->frame_number++;
1308 
1309  if (vstats_filename && frame_size)
1310  do_video_stats(ost, frame_size);
1311  }
1312 
1313  if (!ost->last_frame)
1314  ost->last_frame = av_frame_alloc();
1315  av_frame_unref(ost->last_frame);
1316  if (next_picture && ost->last_frame)
1317  av_frame_ref(ost->last_frame, next_picture);
1318  else
1319  av_frame_free(&ost->last_frame);
1320 
1321  return;
1322 error:
1323  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1324  exit_program(1);
1325 }
1326 
1327 static double psnr(double d)
1328 {
1329  return -10.0 * log10(d);
1330 }
1331 
1333 {
1334  AVCodecContext *enc;
1335  int frame_number;
1336  double ti1, bitrate, avg_bitrate;
1337 
1338  /* this is executed just the first time do_video_stats is called */
1339  if (!vstats_file) {
1340  vstats_file = fopen(vstats_filename, "w");
1341  if (!vstats_file) {
1342  perror("fopen");
1343  exit_program(1);
1344  }
1345  }
1346 
1347  enc = ost->enc_ctx;
1348  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1349  frame_number = ost->st->nb_frames;
1350  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1351  ost->quality / (float)FF_QP2LAMBDA);
1352 
1353  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1354  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1355 
1356  fprintf(vstats_file,"f_size= %6d ", frame_size);
1357  /* compute pts value */
1358  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1359  if (ti1 < 0.01)
1360  ti1 = 0.01;
1361 
1362  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1363  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1364  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1365  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1366  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1367  }
1368 }
1369 
1371 {
1372  OutputFile *of = output_files[ost->file_index];
1373  int i;
1374 
1376 
1377  if (of->shortest) {
1378  for (i = 0; i < of->ctx->nb_streams; i++)
1379  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1380  }
1381 }
1382 
1383 /**
1384  * Get and encode new output from any of the filtergraphs, without causing
1385  * activity.
1386  *
1387  * @return 0 for success, <0 for severe errors
1388  */
1389 static int reap_filters(int flush)
1390 {
1391  AVFrame *filtered_frame = NULL;
1392  int i;
1393 
1394  /* Reap all buffers present in the buffer sinks */
1395  for (i = 0; i < nb_output_streams; i++) {
1396  OutputStream *ost = output_streams[i];
1397  OutputFile *of = output_files[ost->file_index];
1399  AVCodecContext *enc = ost->enc_ctx;
1400  int ret = 0;
1401 
1402  if (!ost->filter)
1403  continue;
1404  filter = ost->filter->filter;
1405 
1406  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1407  return AVERROR(ENOMEM);
1408  }
1409  filtered_frame = ost->filtered_frame;
1410 
1411  while (1) {
1412  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1413  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1415  if (ret < 0) {
1416  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1418  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1419  } else if (flush && ret == AVERROR_EOF) {
1421  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1422  }
1423  break;
1424  }
1425  if (ost->finished) {
1426  av_frame_unref(filtered_frame);
1427  continue;
1428  }
1429  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1430  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1431  AVRational filter_tb = av_buffersink_get_time_base(filter);
1432  AVRational tb = enc->time_base;
1433  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1434 
1435  tb.den <<= extra_bits;
1436  float_pts =
1437  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1438  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1439  float_pts /= 1 << extra_bits;
1440  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1441  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1442 
1443  filtered_frame->pts =
1444  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1445  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1446  }
1447  //if (ost->source_index >= 0)
1448  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1449 
1450  switch (av_buffersink_get_type(filter)) {
1451  case AVMEDIA_TYPE_VIDEO:
1452  if (!ost->frame_aspect_ratio.num)
1453  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1454 
1455  if (debug_ts) {
1456  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1457  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1458  float_pts,
1459  enc->time_base.num, enc->time_base.den);
1460  }
1461 
1462  do_video_out(of, ost, filtered_frame, float_pts);
1463  break;
1464  case AVMEDIA_TYPE_AUDIO:
1465  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1466  enc->channels != av_frame_get_channels(filtered_frame)) {
1468  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1469  break;
1470  }
1471  do_audio_out(of, ost, filtered_frame);
1472  break;
1473  default:
1474  // TODO support subtitle filters
1475  av_assert0(0);
1476  }
1477 
1478  av_frame_unref(filtered_frame);
1479  }
1480  }
1481 
1482  return 0;
1483 }
1484 
1485 static void print_final_stats(int64_t total_size)
1486 {
1487  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1488  uint64_t subtitle_size = 0;
1489  uint64_t data_size = 0;
1490  float percent = -1.0;
1491  int i, j;
1492  int pass1_used = 1;
1493 
1494  for (i = 0; i < nb_output_streams; i++) {
1495  OutputStream *ost = output_streams[i];
1496  switch (ost->enc_ctx->codec_type) {
1497  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1498  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1499  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1500  default: other_size += ost->data_size; break;
1501  }
1502  extra_size += ost->enc_ctx->extradata_size;
1503  data_size += ost->data_size;
1506  pass1_used = 0;
1507  }
1508 
1509  if (data_size && total_size>0 && total_size >= data_size)
1510  percent = 100.0 * (total_size - data_size) / data_size;
1511 
1512  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1513  video_size / 1024.0,
1514  audio_size / 1024.0,
1515  subtitle_size / 1024.0,
1516  other_size / 1024.0,
1517  extra_size / 1024.0);
1518  if (percent >= 0.0)
1519  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1520  else
1521  av_log(NULL, AV_LOG_INFO, "unknown");
1522  av_log(NULL, AV_LOG_INFO, "\n");
1523 
1524  /* print verbose per-stream stats */
1525  for (i = 0; i < nb_input_files; i++) {
1526  InputFile *f = input_files[i];
1527  uint64_t total_packets = 0, total_size = 0;
1528 
1529  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1530  i, f->ctx->filename);
1531 
1532  for (j = 0; j < f->nb_streams; j++) {
1533  InputStream *ist = input_streams[f->ist_index + j];
1534  enum AVMediaType type = ist->dec_ctx->codec_type;
1535 
1536  total_size += ist->data_size;
1537  total_packets += ist->nb_packets;
1538 
1539  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1540  i, j, media_type_string(type));
1541  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1542  ist->nb_packets, ist->data_size);
1543 
1544  if (ist->decoding_needed) {
1545  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1546  ist->frames_decoded);
1547  if (type == AVMEDIA_TYPE_AUDIO)
1548  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1549  av_log(NULL, AV_LOG_VERBOSE, "; ");
1550  }
1551 
1552  av_log(NULL, AV_LOG_VERBOSE, "\n");
1553  }
1554 
1555  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1556  total_packets, total_size);
1557  }
1558 
1559  for (i = 0; i < nb_output_files; i++) {
1560  OutputFile *of = output_files[i];
1561  uint64_t total_packets = 0, total_size = 0;
1562 
1563  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1564  i, of->ctx->filename);
1565 
1566  for (j = 0; j < of->ctx->nb_streams; j++) {
1567  OutputStream *ost = output_streams[of->ost_index + j];
1568  enum AVMediaType type = ost->enc_ctx->codec_type;
1569 
1570  total_size += ost->data_size;
1571  total_packets += ost->packets_written;
1572 
1573  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1574  i, j, media_type_string(type));
1575  if (ost->encoding_needed) {
1576  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1577  ost->frames_encoded);
1578  if (type == AVMEDIA_TYPE_AUDIO)
1579  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1580  av_log(NULL, AV_LOG_VERBOSE, "; ");
1581  }
1582 
1583  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1584  ost->packets_written, ost->data_size);
1585 
1586  av_log(NULL, AV_LOG_VERBOSE, "\n");
1587  }
1588 
1589  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1590  total_packets, total_size);
1591  }
1592  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1593  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1594  if (pass1_used) {
1595  av_log(NULL, AV_LOG_WARNING, "\n");
1596  } else {
1597  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1598  }
1599  }
1600 }
1601 
1602 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1603 {
1604  char buf[1024];
1605  AVBPrint buf_script;
1606  OutputStream *ost;
1607  AVFormatContext *oc;
1608  int64_t total_size;
1609  AVCodecContext *enc;
1610  int frame_number, vid, i;
1611  double bitrate;
1612  double speed;
1613  int64_t pts = INT64_MIN + 1;
1614  static int64_t last_time = -1;
1615  static int qp_histogram[52];
1616  int hours, mins, secs, us;
1617  int ret;
1618  float t;
1619 
1620  if (!print_stats && !is_last_report && !progress_avio)
1621  return;
1622 
1623  if (!is_last_report) {
1624  if (last_time == -1) {
1625  last_time = cur_time;
1626  return;
1627  }
1628  if ((cur_time - last_time) < 500000)
1629  return;
1630  last_time = cur_time;
1631  }
1632 
1633  t = (cur_time-timer_start) / 1000000.0;
1634 
1635 
1636  oc = output_files[0]->ctx;
1637 
1638  total_size = avio_size(oc->pb);
1639  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1640  total_size = avio_tell(oc->pb);
1641 
1642  buf[0] = '\0';
1643  vid = 0;
1644  av_bprint_init(&buf_script, 0, 1);
1645  for (i = 0; i < nb_output_streams; i++) {
1646  float q = -1;
1647  ost = output_streams[i];
1648  enc = ost->enc_ctx;
1649  if (!ost->stream_copy)
1650  q = ost->quality / (float) FF_QP2LAMBDA;
1651 
1652  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1653  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1654  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1655  ost->file_index, ost->index, q);
1656  }
1657  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1658  float fps;
1659 
1660  frame_number = ost->frame_number;
1661  fps = t > 1 ? frame_number / t : 0;
1662  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1663  frame_number, fps < 9.95, fps, q);
1664  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1665  av_bprintf(&buf_script, "fps=%.1f\n", fps);
1666  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1667  ost->file_index, ost->index, q);
1668  if (is_last_report)
1669  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1670  if (qp_hist) {
1671  int j;
1672  int qp = lrintf(q);
1673  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1674  qp_histogram[qp]++;
1675  for (j = 0; j < 32; j++)
1676  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1677  }
1678 
1679  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1680  int j;
1681  double error, error_sum = 0;
1682  double scale, scale_sum = 0;
1683  double p;
1684  char type[3] = { 'Y','U','V' };
1685  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1686  for (j = 0; j < 3; j++) {
1687  if (is_last_report) {
1688  error = enc->error[j];
1689  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1690  } else {
1691  error = ost->error[j];
1692  scale = enc->width * enc->height * 255.0 * 255.0;
1693  }
1694  if (j)
1695  scale /= 4;
1696  error_sum += error;
1697  scale_sum += scale;
1698  p = psnr(error / scale);
1699  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1700  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1701  ost->file_index, ost->index, type[j] | 32, p);
1702  }
1703  p = psnr(error_sum / scale_sum);
1704  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1705  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1706  ost->file_index, ost->index, p);
1707  }
1708  vid = 1;
1709  }
1710  /* compute min output value */
1712  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1713  ost->st->time_base, AV_TIME_BASE_Q));
1714  if (is_last_report)
1715  nb_frames_drop += ost->last_dropped;
1716  }
1717 
1718  secs = FFABS(pts) / AV_TIME_BASE;
1719  us = FFABS(pts) % AV_TIME_BASE;
1720  mins = secs / 60;
1721  secs %= 60;
1722  hours = mins / 60;
1723  mins %= 60;
1724 
1725  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1726  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1727 
1728  if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1729  "size=N/A time=");
1730  else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1731  "size=%8.0fkB time=", total_size / 1024.0);
1732  if (pts < 0)
1733  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1734  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1735  "%02d:%02d:%02d.%02d ", hours, mins, secs,
1736  (100 * us) / AV_TIME_BASE);
1737 
1738  if (bitrate < 0) {
1739  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1740  av_bprintf(&buf_script, "bitrate=N/A\n");
1741  }else{
1742  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1743  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1744  }
1745 
1746  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1747  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1748  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1749  av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1750  hours, mins, secs, us);
1751 
1753  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1755  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1756  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1757 
1758  if (speed < 0) {
1759  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1760  av_bprintf(&buf_script, "speed=N/A\n");
1761  } else {
1762  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1763  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1764  }
1765 
1766  if (print_stats || is_last_report) {
1767  const char end = is_last_report ? '\n' : '\r';
1768  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1769  fprintf(stderr, "%s %c", buf, end);
1770  } else
1771  av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1772 
1773  fflush(stderr);
1774  }
1775 
1776  if (progress_avio) {
1777  av_bprintf(&buf_script, "progress=%s\n",
1778  is_last_report ? "end" : "continue");
1779  avio_write(progress_avio, buf_script.str,
1780  FFMIN(buf_script.len, buf_script.size - 1));
1781  avio_flush(progress_avio);
1782  av_bprint_finalize(&buf_script, NULL);
1783  if (is_last_report) {
1784  if ((ret = avio_closep(&progress_avio)) < 0)
1786  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1787  }
1788  }
1789 
1790  if (is_last_report)
1791  print_final_stats(total_size);
1792 }
1793 
1794 static void flush_encoders(void)
1795 {
1796  int i, ret;
1797 
1798  for (i = 0; i < nb_output_streams; i++) {
1799  OutputStream *ost = output_streams[i];
1800  AVCodecContext *enc = ost->enc_ctx;
1801  OutputFile *of = output_files[ost->file_index];
1802 
1803  if (!ost->encoding_needed)
1804  continue;
1805 
1806  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1807  continue;
1808 #if FF_API_LAVF_FMT_RAWPICTURE
1809  if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1810  continue;
1811 #endif
1812 
1814  continue;
1815 
1816  avcodec_send_frame(enc, NULL);
1817 
1818  for (;;) {
1819  const char *desc = NULL;
1820  AVPacket pkt;
1821  int pkt_size;
1822 
1823  switch (enc->codec_type) {
1824  case AVMEDIA_TYPE_AUDIO:
1825  desc = "audio";
1826  break;
1827  case AVMEDIA_TYPE_VIDEO:
1828  desc = "video";
1829  break;
1830  default:
1831  av_assert0(0);
1832  }
1833 
1834  av_init_packet(&pkt);
1835  pkt.data = NULL;
1836  pkt.size = 0;
1837 
1839  ret = avcodec_receive_packet(enc, &pkt);
1840  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1841  if (ret < 0 && ret != AVERROR_EOF) {
1842  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1843  desc,
1844  av_err2str(ret));
1845  exit_program(1);
1846  }
1847  if (ost->logfile && enc->stats_out) {
1848  fprintf(ost->logfile, "%s", enc->stats_out);
1849  }
1850  if (ret == AVERROR_EOF) {
1851  break;
1852  }
1853  if (ost->finished & MUXER_FINISHED) {
1854  av_packet_unref(&pkt);
1855  continue;
1856  }
1857  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1858  pkt_size = pkt.size;
1859  output_packet(of, &pkt, ost);
1861  do_video_stats(ost, pkt_size);
1862  }
1863  }
1864  }
1865 }
1866 
1867 /*
1868  * Check whether a packet from ist should be written into ost at this time
1869  */
1871 {
1872  OutputFile *of = output_files[ost->file_index];
1873  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1874 
1875  if (ost->source_index != ist_index)
1876  return 0;
1877 
1878  if (ost->finished)
1879  return 0;
1880 
1881  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1882  return 0;
1883 
1884  return 1;
1885 }
1886 
1887 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1888 {
1889  OutputFile *of = output_files[ost->file_index];
1890  InputFile *f = input_files [ist->file_index];
1891  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1892  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1893  AVPicture pict;
1894  AVPacket opkt;
1895 
1896  av_init_packet(&opkt);
1897 
1898  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1900  return;
1901 
1902  if (!ost->frame_number && !ost->copy_prior_start) {
1903  int64_t comp_start = start_time;
1904  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1905  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1906  if (pkt->pts == AV_NOPTS_VALUE ?
1907  ist->pts < comp_start :
1908  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1909  return;
1910  }
1911 
1912  if (of->recording_time != INT64_MAX &&
1913  ist->pts >= of->recording_time + start_time) {
1914  close_output_stream(ost);
1915  return;
1916  }
1917 
1918  if (f->recording_time != INT64_MAX) {
1919  start_time = f->ctx->start_time;
1920  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1921  start_time += f->start_time;
1922  if (ist->pts >= f->recording_time + start_time) {
1923  close_output_stream(ost);
1924  return;
1925  }
1926  }
1927 
1928  /* force the input stream PTS */
1929  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1930  ost->sync_opts++;
1931 
1932  if (pkt->pts != AV_NOPTS_VALUE)
1933  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1934  else
1935  opkt.pts = AV_NOPTS_VALUE;
1936 
1937  if (pkt->dts == AV_NOPTS_VALUE)
1938  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1939  else
1940  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1941  opkt.dts -= ost_tb_start_time;
1942 
1943  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1945  if(!duration)
1946  duration = ist->dec_ctx->frame_size;
1947  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1949  ost->st->time_base) - ost_tb_start_time;
1950  }
1951 
1952  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1953  opkt.flags = pkt->flags;
1954  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1955  if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
1956  && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
1957  && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
1958  && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
1959  ) {
1960  int ret = av_parser_change(ost->parser, ost->parser_avctx,
1961  &opkt.data, &opkt.size,
1962  pkt->data, pkt->size,
1964  if (ret < 0) {
1965  av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1966  av_err2str(ret));
1967  exit_program(1);
1968  }
1969  if (ret) {
1970  opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1971  if (!opkt.buf)
1972  exit_program(1);
1973  }
1974  } else {
1975  opkt.data = pkt->data;
1976  opkt.size = pkt->size;
1977  }
1978  av_copy_packet_side_data(&opkt, pkt);
1979 
1980 #if FF_API_LAVF_FMT_RAWPICTURE
1981  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
1982  ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
1983  (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1984  /* store AVPicture in AVPacket, as expected by the output format */
1985  int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
1986  if (ret < 0) {
1987  av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1988  av_err2str(ret));
1989  exit_program(1);
1990  }
1991  opkt.data = (uint8_t *)&pict;
1992  opkt.size = sizeof(AVPicture);
1993  opkt.flags |= AV_PKT_FLAG_KEY;
1994  }
1995 #endif
1996 
1997  output_packet(of, &opkt, ost);
1998 }
1999 
2001 {
2002  AVCodecContext *dec = ist->dec_ctx;
2003 
2004  if (!dec->channel_layout) {
2005  char layout_name[256];
2006 
2007  if (dec->channels > ist->guess_layout_max)
2008  return 0;
2010  if (!dec->channel_layout)
2011  return 0;
2012  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2013  dec->channels, dec->channel_layout);
2014  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2015  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2016  }
2017  return 1;
2018 }
2019 
2020 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2021 {
2022  if (*got_output || ret<0)
2023  decode_error_stat[ret<0] ++;
2024 
2025  if (ret < 0 && exit_on_error)
2026  exit_program(1);
2027 
2028  if (exit_on_error && *got_output && ist) {
2030  av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2031  exit_program(1);
2032  }
2033  }
2034 }
2035 
2036 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2037 // There is the following difference: if you got a frame, you must call
2038 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2039 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2040 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2041 {
2042  int ret;
2043 
2044  *got_frame = 0;
2045 
2046  if (pkt) {
2047  ret = avcodec_send_packet(avctx, pkt);
2048  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2049  // decoded frames with avcodec_receive_frame() until done.
2050  if (ret < 0 && ret != AVERROR_EOF)
2051  return ret;
2052  }
2053 
2054  ret = avcodec_receive_frame(avctx, frame);
2055  if (ret < 0 && ret != AVERROR(EAGAIN))
2056  return ret;
2057  if (ret >= 0)
2058  *got_frame = 1;
2059 
2060  return 0;
2061 }
2062 
2063 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2064 {
2065  int i, ret;
2066  AVFrame *f;
2067 
2068  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2069  for (i = 0; i < ist->nb_filters; i++) {
2070  if (i < ist->nb_filters - 1) {
2071  f = ist->filter_frame;
2072  ret = av_frame_ref(f, decoded_frame);
2073  if (ret < 0)
2074  break;
2075  } else
2076  f = decoded_frame;
2077  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2079  if (ret == AVERROR_EOF)
2080  ret = 0; /* ignore */
2081  if (ret < 0) {
2083  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2084  break;
2085  }
2086  }
2087  return ret;
2088 }
2089 
2090 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2091 {
2092  AVFrame *decoded_frame;
2093  AVCodecContext *avctx = ist->dec_ctx;
2094  int i, ret, err = 0, resample_changed;
2095  AVRational decoded_frame_tb;
2096 
2097  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2098  return AVERROR(ENOMEM);
2099  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2100  return AVERROR(ENOMEM);
2101  decoded_frame = ist->decoded_frame;
2102 
2104  ret = decode(avctx, decoded_frame, got_output, pkt);
2105  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2106 
2107  if (ret >= 0 && avctx->sample_rate <= 0) {
2108  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2109  ret = AVERROR_INVALIDDATA;
2110  }
2111 
2112  if (ret != AVERROR_EOF)
2113  check_decode_result(ist, got_output, ret);
2114 
2115  if (!*got_output || ret < 0)
2116  return ret;
2117 
2118  ist->samples_decoded += decoded_frame->nb_samples;
2119  ist->frames_decoded++;
2120 
2121 #if 1
2122  /* increment next_dts to use for the case where the input stream does not
2123  have timestamps or there are multiple frames in the packet */
2124  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2125  avctx->sample_rate;
2126  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2127  avctx->sample_rate;
2128 #endif
2129 
2130  resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
2131  ist->resample_channels != avctx->channels ||
2132  ist->resample_channel_layout != decoded_frame->channel_layout ||
2133  ist->resample_sample_rate != decoded_frame->sample_rate;
2134  if (resample_changed) {
2135  char layout1[64], layout2[64];
2136 
2137  if (!guess_input_channel_layout(ist)) {
2138  av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
2139  "layout for Input Stream #%d.%d\n", ist->file_index,
2140  ist->st->index);
2141  exit_program(1);
2142  }
2143  decoded_frame->channel_layout = avctx->channel_layout;
2144 
2145  av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2147  av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2148  decoded_frame->channel_layout);
2149 
2151  "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2152  ist->file_index, ist->st->index,
2154  ist->resample_channels, layout1,
2155  decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2156  avctx->channels, layout2);
2157 
2158  ist->resample_sample_fmt = decoded_frame->format;
2159  ist->resample_sample_rate = decoded_frame->sample_rate;
2160  ist->resample_channel_layout = decoded_frame->channel_layout;
2161  ist->resample_channels = avctx->channels;
2162 
2163  for (i = 0; i < ist->nb_filters; i++) {
2164  err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
2165  if (err < 0) {
2167  "Error reconfiguring input stream %d:%d filter %d\n",
2168  ist->file_index, ist->st->index, i);
2169  goto fail;
2170  }
2171  }
2172 
2173  for (i = 0; i < nb_filtergraphs; i++)
2174  if (ist_in_filtergraph(filtergraphs[i], ist)) {
2175  FilterGraph *fg = filtergraphs[i];
2176  if (configure_filtergraph(fg) < 0) {
2177  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2178  exit_program(1);
2179  }
2180  }
2181  }
2182 
2183  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2184  decoded_frame_tb = ist->st->time_base;
2185  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2186  decoded_frame->pts = pkt->pts;
2187  decoded_frame_tb = ist->st->time_base;
2188  }else {
2189  decoded_frame->pts = ist->dts;
2190  decoded_frame_tb = AV_TIME_BASE_Q;
2191  }
2192  if (decoded_frame->pts != AV_NOPTS_VALUE)
2193  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2194  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2195  (AVRational){1, avctx->sample_rate});
2196  ist->nb_samples = decoded_frame->nb_samples;
2197  err = send_frame_to_filters(ist, decoded_frame);
2198  decoded_frame->pts = AV_NOPTS_VALUE;
2199 
2200 fail:
2201  av_frame_unref(ist->filter_frame);
2202  av_frame_unref(decoded_frame);
2203  return err < 0 ? err : ret;
2204 }
2205 
2206 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof)
2207 {
2208  AVFrame *decoded_frame;
2209  int i, ret = 0, err = 0, resample_changed;
2210  int64_t best_effort_timestamp;
2211  int64_t dts = AV_NOPTS_VALUE;
2212  AVPacket avpkt;
2213 
2214  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2215  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2216  // skip the packet.
2217  if (!eof && pkt && pkt->size == 0)
2218  return 0;
2219 
2220  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2221  return AVERROR(ENOMEM);
2222  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2223  return AVERROR(ENOMEM);
2224  decoded_frame = ist->decoded_frame;
2225  if (ist->dts != AV_NOPTS_VALUE)
2226  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2227  if (pkt) {
2228  avpkt = *pkt;
2229  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2230  }
2231 
2232  // The old code used to set dts on the drain packet, which does not work
2233  // with the new API anymore.
2234  if (eof) {
2235  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2236  if (!new)
2237  return AVERROR(ENOMEM);
2238  ist->dts_buffer = new;
2239  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2240  }
2241 
2243  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2244  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2245 
2246  // The following line may be required in some cases where there is no parser
2247  // or the parser does not has_b_frames correctly
2248  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2249  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2250  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2251  } else
2253  "video_delay is larger in decoder than demuxer %d > %d.\n"
2254  "If you want to help, upload a sample "
2255  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2256  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2257  ist->dec_ctx->has_b_frames,
2258  ist->st->codecpar->video_delay);
2259  }
2260 
2261  if (ret != AVERROR_EOF)
2262  check_decode_result(ist, got_output, ret);
2263 
2264  if (*got_output && ret >= 0) {
2265  if (ist->dec_ctx->width != decoded_frame->width ||
2266  ist->dec_ctx->height != decoded_frame->height ||
2267  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2268  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2269  decoded_frame->width,
2270  decoded_frame->height,
2271  decoded_frame->format,
2272  ist->dec_ctx->width,
2273  ist->dec_ctx->height,
2274  ist->dec_ctx->pix_fmt);
2275  }
2276  }
2277 
2278  if (!*got_output || ret < 0)
2279  return ret;
2280 
2281  if(ist->top_field_first>=0)
2282  decoded_frame->top_field_first = ist->top_field_first;
2283 
2284  ist->frames_decoded++;
2285 
2286  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2287  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2288  if (err < 0)
2289  goto fail;
2290  }
2291  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2292 
2293  best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2294 
2295  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2296  best_effort_timestamp = ist->dts_buffer[0];
2297 
2298  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2299  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2300  ist->nb_dts_buffer--;
2301  }
2302 
2303  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2304  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2305 
2306  if (ts != AV_NOPTS_VALUE)
2307  ist->next_pts = ist->pts = ts;
2308  }
2309 
2310  if (debug_ts) {
2311  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2312  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2313  ist->st->index, av_ts2str(decoded_frame->pts),
2314  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2315  best_effort_timestamp,
2316  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2317  decoded_frame->key_frame, decoded_frame->pict_type,
2318  ist->st->time_base.num, ist->st->time_base.den);
2319  }
2320 
2321  if (ist->st->sample_aspect_ratio.num)
2322  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2323 
2324  resample_changed = ist->resample_width != decoded_frame->width ||
2325  ist->resample_height != decoded_frame->height ||
2326  ist->resample_pix_fmt != decoded_frame->format;
2327  if (resample_changed) {
2328  av_log(NULL, AV_LOG_INFO,
2329  "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2330  ist->file_index, ist->st->index,
2332  decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2333 
2334  ist->resample_width = decoded_frame->width;
2335  ist->resample_height = decoded_frame->height;
2336  ist->resample_pix_fmt = decoded_frame->format;
2337 
2338  for (i = 0; i < ist->nb_filters; i++) {
2339  err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
2340  if (err < 0) {
2341  av_log(NULL, AV_LOG_ERROR,
2342  "Error reconfiguring input stream %d:%d filter %d\n",
2343  ist->file_index, ist->st->index, i);
2344  goto fail;
2345  }
2346  }
2347 
2348  for (i = 0; i < nb_filtergraphs; i++) {
2349  if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2350  configure_filtergraph(filtergraphs[i]) < 0) {
2351  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2352  exit_program(1);
2353  }
2354  }
2355  }
2356 
2357  err = send_frame_to_filters(ist, decoded_frame);
2358 
2359 fail:
2361  av_frame_unref(decoded_frame);
2362  return err < 0 ? err : ret;
2363 }
2364 
2365 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2366 {
2367  AVSubtitle subtitle;
2368  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2369  &subtitle, got_output, pkt);
2370 
2371  check_decode_result(NULL, got_output, ret);
2372 
2373  if (ret < 0 || !*got_output) {
2374  if (!pkt->size)
2375  sub2video_flush(ist);
2376  return ret;
2377  }
2378 
2379  if (ist->fix_sub_duration) {
2380  int end = 1;
2381  if (ist->prev_sub.got_output) {
2382  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2383  1000, AV_TIME_BASE);
2384  if (end < ist->prev_sub.subtitle.end_display_time) {
2385  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2386  "Subtitle duration reduced from %d to %d%s\n",
2388  end <= 0 ? ", dropping it" : "");
2390  }
2391  }
2392  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2393  FFSWAP(int, ret, ist->prev_sub.ret);
2394  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2395  if (end <= 0)
2396  goto out;
2397  }
2398 
2399  if (!*got_output)
2400  return ret;
2401 
2402  sub2video_update(ist, &subtitle);
2403 
2404  if (!subtitle.num_rects)
2405  goto out;
2406 
2407  ist->frames_decoded++;
2408 
2409  for (i = 0; i < nb_output_streams; i++) {
2410  OutputStream *ost = output_streams[i];
2411 
2412  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2413  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2414  continue;
2415 
2416  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2417  }
2418 
2419 out:
2420  avsubtitle_free(&subtitle);
2421  return ret;
2422 }
2423 
2425 {
2426  int i, ret;
2427  for (i = 0; i < ist->nb_filters; i++) {
2428  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2429  if (ret < 0)
2430  return ret;
2431  }
2432  return 0;
2433 }
2434 
2435 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2436 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2437 {
2438  int ret = 0, i;
2439  int repeating = 0;
2440  int eof_reached = 0;
2441 
2442  AVPacket avpkt;
2443  if (!ist->saw_first_ts) {
2444  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2445  ist->pts = 0;
2446  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2447  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2448  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2449  }
2450  ist->saw_first_ts = 1;
2451  }
2452 
2453  if (ist->next_dts == AV_NOPTS_VALUE)
2454  ist->next_dts = ist->dts;
2455  if (ist->next_pts == AV_NOPTS_VALUE)
2456  ist->next_pts = ist->pts;
2457 
2458  if (!pkt) {
2459  /* EOF handling */
2460  av_init_packet(&avpkt);
2461  avpkt.data = NULL;
2462  avpkt.size = 0;
2463  } else {
2464  avpkt = *pkt;
2465  }
2466 
2467  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2468  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2469  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2470  ist->next_pts = ist->pts = ist->dts;
2471  }
2472 
2473  // while we have more to decode or while the decoder did output something on EOF
2474  while (ist->decoding_needed) {
2475  int duration = 0;
2476  int got_output = 0;
2477 
2478  ist->pts = ist->next_pts;
2479  ist->dts = ist->next_dts;
2480 
2481  switch (ist->dec_ctx->codec_type) {
2482  case AVMEDIA_TYPE_AUDIO:
2483  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output);
2484  break;
2485  case AVMEDIA_TYPE_VIDEO:
2486  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt);
2487  if (!repeating || !pkt || got_output) {
2488  if (pkt && pkt->duration) {
2489  duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2490  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2492  duration = ((int64_t)AV_TIME_BASE *
2493  ist->dec_ctx->framerate.den * ticks) /
2495  }
2496 
2497  if(ist->dts != AV_NOPTS_VALUE && duration) {
2498  ist->next_dts += duration;
2499  }else
2500  ist->next_dts = AV_NOPTS_VALUE;
2501  }
2502 
2503  if (got_output)
2504  ist->next_pts += duration; //FIXME the duration is not correct in some cases
2505  break;
2506  case AVMEDIA_TYPE_SUBTITLE:
2507  if (repeating)
2508  break;
2509  ret = transcode_subtitles(ist, &avpkt, &got_output);
2510  if (!pkt && ret >= 0)
2511  ret = AVERROR_EOF;
2512  break;
2513  default:
2514  return -1;
2515  }
2516 
2517  if (ret == AVERROR_EOF) {
2518  eof_reached = 1;
2519  break;
2520  }
2521 
2522  if (ret < 0) {
2523  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2524  ist->file_index, ist->st->index, av_err2str(ret));
2525  if (exit_on_error)
2526  exit_program(1);
2527  // Decoding might not terminate if we're draining the decoder, and
2528  // the decoder keeps returning an error.
2529  // This should probably be considered a libavcodec issue.
2530  // Sample: fate-vsynth1-dnxhd-720p-hr-lb
2531  if (!pkt)
2532  eof_reached = 1;
2533  break;
2534  }
2535 
2536  if (!got_output)
2537  break;
2538 
2539  // During draining, we might get multiple output frames in this loop.
2540  // ffmpeg.c does not drain the filter chain on configuration changes,
2541  // which means if we send multiple frames at once to the filters, and
2542  // one of those frames changes configuration, the buffered frames will
2543  // be lost. This can upset certain FATE tests.
2544  // Decode only 1 frame per call on EOF to appease these FATE tests.
2545  // The ideal solution would be to rewrite decoding to use the new
2546  // decoding API in a better way.
2547  if (!pkt)
2548  break;
2549 
2550  repeating = 1;
2551  }
2552 
2553  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2554  /* except when looping we need to flush but not to send an EOF */
2555  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2556  int ret = send_filter_eof(ist);
2557  if (ret < 0) {
2558  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2559  exit_program(1);
2560  }
2561  }
2562 
2563  /* handle stream copy */
2564  if (!ist->decoding_needed) {
2565  ist->dts = ist->next_dts;
2566  switch (ist->dec_ctx->codec_type) {
2567  case AVMEDIA_TYPE_AUDIO:
2568  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2569  ist->dec_ctx->sample_rate;
2570  break;
2571  case AVMEDIA_TYPE_VIDEO:
2572  if (ist->framerate.num) {
2573  // TODO: Remove work-around for c99-to-c89 issue 7
2574  AVRational time_base_q = AV_TIME_BASE_Q;
2575  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2576  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2577  } else if (pkt->duration) {
2578  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2579  } else if(ist->dec_ctx->framerate.num != 0) {
2580  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2581  ist->next_dts += ((int64_t)AV_TIME_BASE *
2582  ist->dec_ctx->framerate.den * ticks) /
2584  }
2585  break;
2586  }
2587  ist->pts = ist->dts;
2588  ist->next_pts = ist->next_dts;
2589  }
2590  for (i = 0; pkt && i < nb_output_streams; i++) {
2591  OutputStream *ost = output_streams[i];
2592 
2593  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2594  continue;
2595 
2596  do_streamcopy(ist, ost, pkt);
2597  }
2598 
2599  return !eof_reached;
2600 }
2601 
2602 static void print_sdp(void)
2603 {
2604  char sdp[16384];
2605  int i;
2606  int j;
2607  AVIOContext *sdp_pb;
2608  AVFormatContext **avc;
2609 
2610  for (i = 0; i < nb_output_files; i++) {
2611  if (!output_files[i]->header_written)
2612  return;
2613  }
2614 
2615  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2616  if (!avc)
2617  exit_program(1);
2618  for (i = 0, j = 0; i < nb_output_files; i++) {
2619  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2620  avc[j] = output_files[i]->ctx;
2621  j++;
2622  }
2623  }
2624 
2625  if (!j)
2626  goto fail;
2627 
2628  av_sdp_create(avc, j, sdp, sizeof(sdp));
2629 
2630  if (!sdp_filename) {
2631  printf("SDP:\n%s\n", sdp);
2632  fflush(stdout);
2633  } else {
2634  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2635  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2636  } else {
2637  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2638  avio_closep(&sdp_pb);
2640  }
2641  }
2642 
2643 fail:
2644  av_freep(&avc);
2645 }
2646 
2648 {
2649  int i;
2650  for (i = 0; hwaccels[i].name; i++)
2651  if (hwaccels[i].pix_fmt == pix_fmt)
2652  return &hwaccels[i];
2653  return NULL;
2654 }
2655 
2657 {
2658  InputStream *ist = s->opaque;
2659  const enum AVPixelFormat *p;
2660  int ret;
2661 
2662  for (p = pix_fmts; *p != -1; p++) {
2664  const HWAccel *hwaccel;
2665 
2666  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2667  break;
2668 
2669  hwaccel = get_hwaccel(*p);
2670  if (!hwaccel ||
2671  (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2672  (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2673  continue;
2674 
2675  ret = hwaccel->init(s);
2676  if (ret < 0) {
2677  if (ist->hwaccel_id == hwaccel->id) {
2679  "%s hwaccel requested for input stream #%d:%d, "
2680  "but cannot be initialized.\n", hwaccel->name,
2681  ist->file_index, ist->st->index);
2682  return AV_PIX_FMT_NONE;
2683  }
2684  continue;
2685  }
2686 
2687  if (ist->hw_frames_ctx) {
2689  if (!s->hw_frames_ctx)
2690  return AV_PIX_FMT_NONE;
2691  }
2692 
2693  ist->active_hwaccel_id = hwaccel->id;
2694  ist->hwaccel_pix_fmt = *p;
2695  break;
2696  }
2697 
2698  return *p;
2699 }
2700 
2702 {
2703  InputStream *ist = s->opaque;
2704 
2705  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2706  return ist->hwaccel_get_buffer(s, frame, flags);
2707 
2708  return avcodec_default_get_buffer2(s, frame, flags);
2709 }
2710 
2711 static int init_input_stream(int ist_index, char *error, int error_len)
2712 {
2713  int i, ret;
2714  InputStream *ist = input_streams[ist_index];
2715 
2716  for (i = 0; i < ist->nb_filters; i++) {
2717  ret = ifilter_parameters_from_decoder(ist->filters[i], ist->dec_ctx);
2718  if (ret < 0) {
2719  av_log(NULL, AV_LOG_FATAL, "Error initializing filter input\n");
2720  return ret;
2721  }
2722  }
2723 
2724  if (ist->decoding_needed) {
2725  AVCodec *codec = ist->dec;
2726  if (!codec) {
2727  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2728  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2729  return AVERROR(EINVAL);
2730  }
2731 
2732  ist->dec_ctx->opaque = ist;
2733  ist->dec_ctx->get_format = get_format;
2734  ist->dec_ctx->get_buffer2 = get_buffer;
2735  ist->dec_ctx->thread_safe_callbacks = 1;
2736 
2737  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2738  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2739  (ist->decoding_needed & DECODING_FOR_OST)) {
2740  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2742  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2743  }
2744 
2745  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2746 
2747  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2748  * audio, and video decoders such as cuvid or mediacodec */
2750 
2751  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2752  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2753  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2754  if (ret == AVERROR_EXPERIMENTAL)
2755  abort_codec_experimental(codec, 0);
2756 
2757  snprintf(error, error_len,
2758  "Error while opening decoder for input stream "
2759  "#%d:%d : %s",
2760  ist->file_index, ist->st->index, av_err2str(ret));
2761  return ret;
2762  }
2764  }
2765 
2766  ist->next_pts = AV_NOPTS_VALUE;
2767  ist->next_dts = AV_NOPTS_VALUE;
2768 
2769  return 0;
2770 }
2771 
2773 {
2774  if (ost->source_index >= 0)
2775  return input_streams[ost->source_index];
2776  return NULL;
2777 }
2778 
2779 static int compare_int64(const void *a, const void *b)
2780 {
2781  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2782 }
2783 
2784 /* open the muxer when all the streams are initialized */
2785 static int check_init_output_file(OutputFile *of, int file_index)
2786 {
2787  int ret, i;
2788 
2789  for (i = 0; i < of->ctx->nb_streams; i++) {
2790  OutputStream *ost = output_streams[of->ost_index + i];
2791  if (!ost->initialized)
2792  return 0;
2793  }
2794 
2795  of->ctx->interrupt_callback = int_cb;
2796 
2797  ret = avformat_write_header(of->ctx, &of->opts);
2798  if (ret < 0) {
2800  "Could not write header for output file #%d "
2801  "(incorrect codec parameters ?): %s\n",
2802  file_index, av_err2str(ret));
2803  return ret;
2804  }
2805  //assert_avoptions(of->opts);
2806  of->header_written = 1;
2807 
2808  av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2809 
2810  if (sdp_filename || want_sdp)
2811  print_sdp();
2812 
2813  /* flush the muxing queues */
2814  for (i = 0; i < of->ctx->nb_streams; i++) {
2815  OutputStream *ost = output_streams[of->ost_index + i];
2816 
2817  while (av_fifo_size(ost->muxing_queue)) {
2818  AVPacket pkt;
2819  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2820  write_packet(of, &pkt, ost);
2821  }
2822  }
2823 
2824  return 0;
2825 }
2826 
2828 {
2829  AVBSFContext *ctx;
2830  int i, ret;
2831 
2832  if (!ost->nb_bitstream_filters)
2833  return 0;
2834 
2835  for (i = 0; i < ost->nb_bitstream_filters; i++) {
2836  ctx = ost->bsf_ctx[i];
2837 
2838  ret = avcodec_parameters_copy(ctx->par_in,
2839  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2840  if (ret < 0)
2841  return ret;
2842 
2843  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2844 
2845  ret = av_bsf_init(ctx);
2846  if (ret < 0) {
2847  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2848  ost->bsf_ctx[i]->filter->name);
2849  return ret;
2850  }
2851  }
2852 
2853  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
2854  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2855  if (ret < 0)
2856  return ret;
2857 
2858  ost->st->time_base = ctx->time_base_out;
2859 
2860  return 0;
2861 }
2862 
2864 {
2865  OutputFile *of = output_files[ost->file_index];
2866  InputStream *ist = get_input_stream(ost);
2867  AVCodecParameters *par_dst = ost->st->codecpar;
2868  AVCodecParameters *par_src = ost->ref_par;
2869  AVRational sar;
2870  int i, ret;
2871  uint32_t codec_tag = par_dst->codec_tag;
2872 
2873  av_assert0(ist && !ost->filter);
2874 
2875  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
2876  if (ret >= 0)
2877  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2878  if (ret < 0) {
2880  "Error setting up codec context options.\n");
2881  return ret;
2882  }
2884 
2885  if (!codec_tag) {
2886  unsigned int codec_tag_tmp;
2887  if (!of->ctx->oformat->codec_tag ||
2888  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
2889  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
2890  codec_tag = par_src->codec_tag;
2891  }
2892 
2893  ret = avcodec_parameters_copy(par_dst, par_src);
2894  if (ret < 0)
2895  return ret;
2896 
2897  par_dst->codec_tag = codec_tag;
2898 
2899  if (!ost->frame_rate.num)
2900  ost->frame_rate = ist->framerate;
2901  ost->st->avg_frame_rate = ost->frame_rate;
2902 
2904  if (ret < 0)
2905  return ret;
2906 
2907  // copy timebase while removing common factors
2908  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
2910 
2911  // copy disposition
2912  ost->st->disposition = ist->st->disposition;
2913 
2914  if (ist->st->nb_side_data) {
2915  ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2916  sizeof(*ist->st->side_data));
2917  if (!ost->st->side_data)
2918  return AVERROR(ENOMEM);
2919 
2920  ost->st->nb_side_data = 0;
2921  for (i = 0; i < ist->st->nb_side_data; i++) {
2922  const AVPacketSideData *sd_src = &ist->st->side_data[i];
2923  AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2924 
2925  if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2926  continue;
2927 
2928  sd_dst->data = av_malloc(sd_src->size);
2929  if (!sd_dst->data)
2930  return AVERROR(ENOMEM);
2931  memcpy(sd_dst->data, sd_src->data, sd_src->size);
2932  sd_dst->size = sd_src->size;
2933  sd_dst->type = sd_src->type;
2934  ost->st->nb_side_data++;
2935  }
2936  }
2937 
2938  ost->parser = av_parser_init(par_dst->codec_id);
2939  ost->parser_avctx = avcodec_alloc_context3(NULL);
2940  if (!ost->parser_avctx)
2941  return AVERROR(ENOMEM);
2942 
2943  switch (par_dst->codec_type) {
2944  case AVMEDIA_TYPE_AUDIO:
2945  if (audio_volume != 256) {
2946  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2947  exit_program(1);
2948  }
2949  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
2950  par_dst->block_align= 0;
2951  if(par_dst->codec_id == AV_CODEC_ID_AC3)
2952  par_dst->block_align= 0;
2953  break;
2954  case AVMEDIA_TYPE_VIDEO:
2955  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2956  sar =
2957  av_mul_q(ost->frame_aspect_ratio,
2958  (AVRational){ par_dst->height, par_dst->width });
2959  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2960  "with stream copy may produce invalid files\n");
2961  }
2962  else if (ist->st->sample_aspect_ratio.num)
2963  sar = ist->st->sample_aspect_ratio;
2964  else
2965  sar = par_src->sample_aspect_ratio;
2966  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
2967  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2968  ost->st->r_frame_rate = ist->st->r_frame_rate;
2969  break;
2970  }
2971 
2972  return 0;
2973 }
2974 
2976 {
2977  AVDictionaryEntry *e;
2978 
2979  uint8_t *encoder_string;
2980  int encoder_string_len;
2981  int format_flags = 0;
2982  int codec_flags = 0;
2983 
2984  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2985  return;
2986 
2987  e = av_dict_get(of->opts, "fflags", NULL, 0);
2988  if (e) {
2989  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2990  if (!o)
2991  return;
2992  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2993  }
2994  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2995  if (e) {
2996  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2997  if (!o)
2998  return;
2999  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3000  }
3001 
3002  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3003  encoder_string = av_mallocz(encoder_string_len);
3004  if (!encoder_string)
3005  exit_program(1);
3006 
3007  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3008  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3009  else
3010  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3011  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3012  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3014 }
3015 
3016 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3017  AVCodecContext *avctx)
3018 {
3019  char *p;
3020  int n = 1, i, size, index = 0;
3021  int64_t t, *pts;
3022 
3023  for (p = kf; *p; p++)
3024  if (*p == ',')
3025  n++;
3026  size = n;
3027  pts = av_malloc_array(size, sizeof(*pts));
3028  if (!pts) {
3029  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3030  exit_program(1);
3031  }
3032 
3033  p = kf;
3034  for (i = 0; i < n; i++) {
3035  char *next = strchr(p, ',');
3036 
3037  if (next)
3038  *next++ = 0;
3039 
3040  if (!memcmp(p, "chapters", 8)) {
3041 
3042  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3043  int j;
3044 
3045  if (avf->nb_chapters > INT_MAX - size ||
3046  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3047  sizeof(*pts)))) {
3049  "Could not allocate forced key frames array.\n");
3050  exit_program(1);
3051  }
3052  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3053  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3054 
3055  for (j = 0; j < avf->nb_chapters; j++) {
3056  AVChapter *c = avf->chapters[j];
3057  av_assert1(index < size);
3058  pts[index++] = av_rescale_q(c->start, c->time_base,
3059  avctx->time_base) + t;
3060  }
3061 
3062  } else {
3063 
3064  t = parse_time_or_die("force_key_frames", p, 1);
3065  av_assert1(index < size);
3066  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3067 
3068  }
3069 
3070  p = next;
3071  }
3072 
3073  av_assert0(index == size);
3074  qsort(pts, size, sizeof(*pts), compare_int64);
3075  ost->forced_kf_count = size;
3076  ost->forced_kf_pts = pts;
3077 }
3078 
3080 {
3081  InputStream *ist = get_input_stream(ost);
3082  AVCodecContext *enc_ctx = ost->enc_ctx;
3084  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3085  int j, ret;
3086 
3087  set_encoder_id(output_files[ost->file_index], ost);
3088 
3089  if (ist) {
3090  ost->st->disposition = ist->st->disposition;
3091 
3092  dec_ctx = ist->dec_ctx;
3093 
3094  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3095  } else {
3096  for (j = 0; j < oc->nb_streams; j++) {
3097  AVStream *st = oc->streams[j];
3098  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3099  break;
3100  }
3101  if (j == oc->nb_streams)
3102  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3105  }
3106 
3107  if ((enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3108  enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3110  FilterGraph *fg = ost->filter->graph;
3111 
3112  if (configure_filtergraph(fg)) {
3113  av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3114  exit_program(1);
3115  }
3116  }
3117 
3118  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3119  if (!ost->frame_rate.num)
3121  if (ist && !ost->frame_rate.num)
3122  ost->frame_rate = ist->framerate;
3123  if (ist && !ost->frame_rate.num)
3124  ost->frame_rate = ist->st->r_frame_rate;
3125  if (ist && !ost->frame_rate.num) {
3126  ost->frame_rate = (AVRational){25, 1};
3128  "No information "
3129  "about the input framerate is available. Falling "
3130  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3131  "if you want a different framerate.\n",
3132  ost->file_index, ost->index);
3133  }
3134 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3135  if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3136  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3137  ost->frame_rate = ost->enc->supported_framerates[idx];
3138  }
3139  // reduce frame rate for mpeg4 to be within the spec limits
3140  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3141  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3142  ost->frame_rate.num, ost->frame_rate.den, 65535);
3143  }
3144  }
3145 
3146  switch (enc_ctx->codec_type) {
3147  case AVMEDIA_TYPE_AUDIO:
3149  if (dec_ctx)
3150  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3151  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3155  enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3156  break;
3157  case AVMEDIA_TYPE_VIDEO:
3158  enc_ctx->time_base = av_inv_q(ost->frame_rate);
3159  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3161  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3163  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3164  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3165  }
3166  for (j = 0; j < ost->forced_kf_count; j++)
3167  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3169  enc_ctx->time_base);
3170 
3171  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3172  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3173  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3174  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3175  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3177  if (!strncmp(ost->enc->name, "libx264", 7) &&
3178  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3181  "No pixel format specified, %s for H.264 encoding chosen.\n"
3182  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3184  if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3185  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3188  "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3189  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3191  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3192  if (dec_ctx)
3193  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3194  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3195 
3196  ost->st->avg_frame_rate = ost->frame_rate;
3197 
3198  if (!dec_ctx ||
3199  enc_ctx->width != dec_ctx->width ||
3200  enc_ctx->height != dec_ctx->height ||
3201  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3203  }
3204 
3205  if (ost->forced_keyframes) {
3206  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3209  if (ret < 0) {
3211  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3212  return ret;
3213  }
3218 
3219  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3220  // parse it only for static kf timings
3221  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3223  }
3224  }
3225  break;
3226  case AVMEDIA_TYPE_SUBTITLE:
3227  enc_ctx->time_base = (AVRational){1, 1000};
3228  if (!enc_ctx->width) {
3229  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3230  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3231  }
3232  break;
3233  case AVMEDIA_TYPE_DATA:
3234  break;
3235  default:
3236  abort();
3237  break;
3238  }
3239 
3240  return 0;
3241 }
3242 
3243 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3244 {
3245  int ret = 0;
3246 
3247  if (ost->encoding_needed) {
3248  AVCodec *codec = ost->enc;
3249  AVCodecContext *dec = NULL;
3250  InputStream *ist;
3251 
3252  ret = init_output_stream_encode(ost);
3253  if (ret < 0)
3254  return ret;
3255 
3256  if ((ist = get_input_stream(ost)))
3257  dec = ist->dec_ctx;
3258  if (dec && dec->subtitle_header) {
3259  /* ASS code assumes this buffer is null terminated so add extra byte. */
3261  if (!ost->enc_ctx->subtitle_header)
3262  return AVERROR(ENOMEM);
3263  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3265  }
3266  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3267  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3268  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3269  !codec->defaults &&
3270  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3271  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3272  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3273 
3274  if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter)) {
3276  if (!ost->enc_ctx->hw_frames_ctx)
3277  return AVERROR(ENOMEM);
3278  }
3279 
3280  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3281  if (ret == AVERROR_EXPERIMENTAL)
3282  abort_codec_experimental(codec, 1);
3283  snprintf(error, error_len,
3284  "Error while opening encoder for output stream #%d:%d - "
3285  "maybe incorrect parameters such as bit_rate, rate, width or height",
3286  ost->file_index, ost->index);
3287  return ret;
3288  }
3289  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3290  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3292  ost->enc_ctx->frame_size);
3294  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3295  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3296  " It takes bits/s as argument, not kbits/s\n");
3297 
3299  if (ret < 0) {
3301  "Error initializing the output stream codec context.\n");
3302  exit_program(1);
3303  }
3304  /*
3305  * FIXME: ost->st->codec should't be needed here anymore.
3306  */
3307  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3308  if (ret < 0)
3309  return ret;
3310 
3311  if (ost->enc_ctx->nb_coded_side_data) {
3312  int i;
3313 
3315  sizeof(*ost->st->side_data));
3316  if (!ost->st->side_data)
3317  return AVERROR(ENOMEM);
3318 
3319  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3320  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3321  AVPacketSideData *sd_dst = &ost->st->side_data[i];
3322 
3323  sd_dst->data = av_malloc(sd_src->size);
3324  if (!sd_dst->data)
3325  return AVERROR(ENOMEM);
3326  memcpy(sd_dst->data, sd_src->data, sd_src->size);
3327  sd_dst->size = sd_src->size;
3328  sd_dst->type = sd_src->type;
3329  ost->st->nb_side_data++;
3330  }
3331  }
3332 
3333  // copy timebase while removing common factors
3334  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3335  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3336  ost->st->codec->codec= ost->enc_ctx->codec;
3337  } else if (ost->stream_copy) {
3338  ret = init_output_stream_streamcopy(ost);
3339  if (ret < 0)
3340  return ret;
3341 
3342  /*
3343  * FIXME: will the codec context used by the parser during streamcopy
3344  * This should go away with the new parser API.
3345  */
3346  ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3347  if (ret < 0)
3348  return ret;
3349  }
3350 
3351  // parse user provided disposition, and update stream values
3352  if (ost->disposition) {
3353  static const AVOption opts[] = {
3354  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3355  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3356  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3357  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3358  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3359  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3360  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3361  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3362  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3363  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3364  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3365  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3366  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3367  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3368  { NULL },
3369  };
3370  static const AVClass class = {
3371  .class_name = "",
3372  .item_name = av_default_item_name,
3373  .option = opts,
3374  .version = LIBAVUTIL_VERSION_INT,
3375  };
3376  const AVClass *pclass = &class;
3377 
3378  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3379  if (ret < 0)
3380  return ret;
3381  }
3382 
3383  /* initialize bitstream filters for the output stream
3384  * needs to be done here, because the codec id for streamcopy is not
3385  * known until now */
3386  ret = init_output_bsfs(ost);
3387  if (ret < 0)
3388  return ret;
3389 
3390  ost->initialized = 1;
3391 
3392  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3393  if (ret < 0)
3394  return ret;
3395 
3396  return ret;
3397 }
3398 
3399 static void report_new_stream(int input_index, AVPacket *pkt)
3400 {
3401  InputFile *file = input_files[input_index];
3402  AVStream *st = file->ctx->streams[pkt->stream_index];
3403 
3404  if (pkt->stream_index < file->nb_streams_warn)
3405  return;
3406  av_log(file->ctx, AV_LOG_WARNING,
3407  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3409  input_index, pkt->stream_index,
3410  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3411  file->nb_streams_warn = pkt->stream_index + 1;
3412 }
3413 
3414 static int transcode_init(void)
3415 {
3416  int ret = 0, i, j, k;
3417  AVFormatContext *oc;
3418  OutputStream *ost;
3419  InputStream *ist;
3420  char error[1024] = {0};
3421 
3422  for (i = 0; i < nb_filtergraphs; i++) {
3423  FilterGraph *fg = filtergraphs[i];
3424  for (j = 0; j < fg->nb_outputs; j++) {
3425  OutputFilter *ofilter = fg->outputs[j];
3426  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3427  continue;
3428  if (fg->nb_inputs != 1)
3429  continue;
3430  for (k = nb_input_streams-1; k >= 0 ; k--)
3431  if (fg->inputs[0]->ist == input_streams[k])
3432  break;
3433  ofilter->ost->source_index = k;
3434  }
3435  }
3436 
3437  /* init framerate emulation */
3438  for (i = 0; i < nb_input_files; i++) {
3439  InputFile *ifile = input_files[i];
3440  if (ifile->rate_emu)
3441  for (j = 0; j < ifile->nb_streams; j++)
3442  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3443  }
3444 
3445  /* hwaccel transcoding */
3446  for (i = 0; i < nb_output_streams; i++) {
3447  ost = output_streams[i];
3448 
3449  if (!ost->stream_copy) {
3450 #if CONFIG_LIBMFX
3451  if (qsv_transcode_init(ost))
3452  exit_program(1);
3453 #endif
3454 
3455 #if CONFIG_CUVID
3456  if (cuvid_transcode_init(ost))
3457  exit_program(1);
3458 #endif
3459  }
3460  }
3461 
3462  /* init input streams */
3463  for (i = 0; i < nb_input_streams; i++)
3464  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3465  for (i = 0; i < nb_output_streams; i++) {
3466  ost = output_streams[i];
3467  avcodec_close(ost->enc_ctx);
3468  }
3469  goto dump_format;
3470  }
3471 
3472  /* open each encoder */
3473  for (i = 0; i < nb_output_streams; i++) {
3474  ret = init_output_stream(output_streams[i], error, sizeof(error));
3475  if (ret < 0)
3476  goto dump_format;
3477  }
3478 
3479  /* discard unused programs */
3480  for (i = 0; i < nb_input_files; i++) {
3481  InputFile *ifile = input_files[i];
3482  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3483  AVProgram *p = ifile->ctx->programs[j];
3484  int discard = AVDISCARD_ALL;
3485 
3486  for (k = 0; k < p->nb_stream_indexes; k++)
3487  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3488  discard = AVDISCARD_DEFAULT;
3489  break;
3490  }
3491  p->discard = discard;
3492  }
3493  }
3494 
3495  /* write headers for files with no streams */
3496  for (i = 0; i < nb_output_files; i++) {
3497  oc = output_files[i]->ctx;
3498  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3499  ret = check_init_output_file(output_files[i], i);
3500  if (ret < 0)
3501  goto dump_format;
3502  }
3503  }
3504 
3505  dump_format:
3506  /* dump the stream mapping */
3507  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3508  for (i = 0; i < nb_input_streams; i++) {
3509  ist = input_streams[i];
3510 
3511  for (j = 0; j < ist->nb_filters; j++) {
3512  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3513  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3514  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3515  ist->filters[j]->name);
3516  if (nb_filtergraphs > 1)
3517  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3518  av_log(NULL, AV_LOG_INFO, "\n");
3519  }
3520  }
3521  }
3522 
3523  for (i = 0; i < nb_output_streams; i++) {
3524  ost = output_streams[i];
3525 
3526  if (ost->attachment_filename) {
3527  /* an attached file */
3528  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3529  ost->attachment_filename, ost->file_index, ost->index);
3530  continue;
3531  }
3532 
3533  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3534  /* output from a complex graph */
3535  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3536  if (nb_filtergraphs > 1)
3537  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3538 
3539  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3540  ost->index, ost->enc ? ost->enc->name : "?");
3541  continue;
3542  }
3543 
3544  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3545  input_streams[ost->source_index]->file_index,
3546  input_streams[ost->source_index]->st->index,
3547  ost->file_index,
3548  ost->index);
3549  if (ost->sync_ist != input_streams[ost->source_index])
3550  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3551  ost->sync_ist->file_index,
3552  ost->sync_ist->st->index);
3553  if (ost->stream_copy)
3554  av_log(NULL, AV_LOG_INFO, " (copy)");
3555  else {
3556  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3557  const AVCodec *out_codec = ost->enc;
3558  const char *decoder_name = "?";
3559  const char *in_codec_name = "?";
3560  const char *encoder_name = "?";
3561  const char *out_codec_name = "?";
3562  const AVCodecDescriptor *desc;
3563 
3564  if (in_codec) {
3565  decoder_name = in_codec->name;
3566  desc = avcodec_descriptor_get(in_codec->id);
3567  if (desc)
3568  in_codec_name = desc->name;
3569  if (!strcmp(decoder_name, in_codec_name))
3570  decoder_name = "native";
3571  }
3572 
3573  if (out_codec) {
3574  encoder_name = out_codec->name;
3575  desc = avcodec_descriptor_get(out_codec->id);
3576  if (desc)
3577  out_codec_name = desc->name;
3578  if (!strcmp(encoder_name, out_codec_name))
3579  encoder_name = "native";
3580  }
3581 
3582  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3583  in_codec_name, decoder_name,
3584  out_codec_name, encoder_name);
3585  }
3586  av_log(NULL, AV_LOG_INFO, "\n");
3587  }
3588 
3589  if (ret) {
3590  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3591  return ret;
3592  }
3593 
3594  transcode_init_done = 1;
3595 
3596  return 0;
3597 }
3598 
3599 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3600 static int need_output(void)
3601 {
3602  int i;
3603 
3604  for (i = 0; i < nb_output_streams; i++) {
3605  OutputStream *ost = output_streams[i];
3606  OutputFile *of = output_files[ost->file_index];
3607  AVFormatContext *os = output_files[ost->file_index]->ctx;
3608 
3609  if (ost->finished ||
3610  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3611  continue;
3612  if (ost->frame_number >= ost->max_frames) {
3613  int j;
3614  for (j = 0; j < of->ctx->nb_streams; j++)
3615  close_output_stream(output_streams[of->ost_index + j]);
3616  continue;
3617  }
3618 
3619  return 1;
3620  }
3621 
3622  return 0;
3623 }
3624 
3625 /**
3626  * Select the output stream to process.
3627  *
3628  * @return selected output stream, or NULL if none available
3629  */
3631 {
3632  int i;
3633  int64_t opts_min = INT64_MAX;
3634  OutputStream *ost_min = NULL;
3635 
3636  for (i = 0; i < nb_output_streams; i++) {
3637  OutputStream *ost = output_streams[i];
3638  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3639  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3640  AV_TIME_BASE_Q);
3641  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3642  av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3643 
3644  if (!ost->finished && opts < opts_min) {
3645  opts_min = opts;
3646  ost_min = ost->unavailable ? NULL : ost;
3647  }
3648  }
3649  return ost_min;
3650 }
3651 
3652 static void set_tty_echo(int on)
3653 {
3654 #if HAVE_TERMIOS_H
3655  struct termios tty;
3656  if (tcgetattr(0, &tty) == 0) {
3657  if (on) tty.c_lflag |= ECHO;
3658  else tty.c_lflag &= ~ECHO;
3659  tcsetattr(0, TCSANOW, &tty);
3660  }
3661 #endif
3662 }
3663 
3665 {
3666  int i, ret, key;
3667  static int64_t last_time;
3668  if (received_nb_signals)
3669  return AVERROR_EXIT;
3670  /* read_key() returns 0 on EOF */
3671  if(cur_time - last_time >= 100000 && !run_as_daemon){
3672  key = read_key();
3673  last_time = cur_time;
3674  }else
3675  key = -1;
3676  if (key == 'q')
3677  return AVERROR_EXIT;
3678  if (key == '+') av_log_set_level(av_log_get_level()+10);
3679  if (key == '-') av_log_set_level(av_log_get_level()-10);
3680  if (key == 's') qp_hist ^= 1;
3681  if (key == 'h'){
3682  if (do_hex_dump){
3683  do_hex_dump = do_pkt_dump = 0;
3684  } else if(do_pkt_dump){
3685  do_hex_dump = 1;
3686  } else
3687  do_pkt_dump = 1;
3689  }
3690  if (key == 'c' || key == 'C'){
3691  char buf[4096], target[64], command[256], arg[256] = {0};
3692  double time;
3693  int k, n = 0;
3694  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3695  i = 0;
3696  set_tty_echo(1);
3697  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3698  if (k > 0)
3699  buf[i++] = k;
3700  buf[i] = 0;
3701  set_tty_echo(0);
3702  fprintf(stderr, "\n");
3703  if (k > 0 &&
3704  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3705  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3706  target, time, command, arg);
3707  for (i = 0; i < nb_filtergraphs; i++) {
3708  FilterGraph *fg = filtergraphs[i];
3709  if (fg->graph) {
3710  if (time < 0) {
3711  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3712  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3713  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3714  } else if (key == 'c') {
3715  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3716  ret = AVERROR_PATCHWELCOME;
3717  } else {
3718  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3719  if (ret < 0)
3720  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3721  }
3722  }
3723  }
3724  } else {
3726  "Parse error, at least 3 arguments were expected, "
3727  "only %d given in string '%s'\n", n, buf);
3728  }
3729  }
3730  if (key == 'd' || key == 'D'){
3731  int debug=0;
3732  if(key == 'D') {
3733  debug = input_streams[0]->st->codec->debug<<1;
3734  if(!debug) debug = 1;
3735  while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3736  debug += debug;
3737  }else{
3738  char buf[32];
3739  int k = 0;
3740  i = 0;
3741  set_tty_echo(1);
3742  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3743  if (k > 0)
3744  buf[i++] = k;
3745  buf[i] = 0;
3746  set_tty_echo(0);
3747  fprintf(stderr, "\n");
3748  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3749  fprintf(stderr,"error parsing debug value\n");
3750  }
3751  for(i=0;i<nb_input_streams;i++) {
3752  input_streams[i]->st->codec->debug = debug;
3753  }
3754  for(i=0;i<nb_output_streams;i++) {
3755  OutputStream *ost = output_streams[i];
3756  ost->enc_ctx->debug = debug;
3757  }
3758  if(debug) av_log_set_level(AV_LOG_DEBUG);
3759  fprintf(stderr,"debug=%d\n", debug);
3760  }
3761  if (key == '?'){
3762  fprintf(stderr, "key function\n"
3763  "? show this help\n"
3764  "+ increase verbosity\n"
3765  "- decrease verbosity\n"
3766  "c Send command to first matching filter supporting it\n"
3767  "C Send/Queue command to all matching filters\n"
3768  "D cycle through available debug modes\n"
3769  "h dump packets/hex press to cycle through the 3 states\n"
3770  "q quit\n"
3771  "s Show QP histogram\n"
3772  );
3773  }
3774  return 0;
3775 }
3776 
3777 #if HAVE_PTHREADS
3778 static void *input_thread(void *arg)
3779 {
3780  InputFile *f = arg;
3781  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3782  int ret = 0;
3783 
3784  while (1) {
3785  AVPacket pkt;
3786  ret = av_read_frame(f->ctx, &pkt);
3787 
3788  if (ret == AVERROR(EAGAIN)) {
3789  av_usleep(10000);
3790  continue;
3791  }
3792  if (ret < 0) {
3793  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3794  break;
3795  }
3796  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3797  if (flags && ret == AVERROR(EAGAIN)) {
3798  flags = 0;
3799  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3801  "Thread message queue blocking; consider raising the "
3802  "thread_queue_size option (current value: %d)\n",
3803  f->thread_queue_size);
3804  }
3805  if (ret < 0) {
3806  if (ret != AVERROR_EOF)
3807  av_log(f->ctx, AV_LOG_ERROR,
3808  "Unable to send packet to main thread: %s\n",
3809  av_err2str(ret));
3810  av_packet_unref(&pkt);
3811  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3812  break;
3813  }
3814  }
3815 
3816  return NULL;
3817 }
3818 
3819 static void free_input_threads(void)
3820 {
3821  int i;
3822 
3823  for (i = 0; i < nb_input_files; i++) {
3824  InputFile *f = input_files[i];
3825  AVPacket pkt;
3826 
3827  if (!f || !f->in_thread_queue)
3828  continue;
3830  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3831  av_packet_unref(&pkt);
3832 
3833  pthread_join(f->thread, NULL);
3834  f->joined = 1;
3835  av_thread_message_queue_free(&f->in_thread_queue);
3836  }
3837 }
3838 
3839 static int init_input_threads(void)
3840 {
3841  int i, ret;
3842 
3843  if (nb_input_files == 1)
3844  return 0;
3845 
3846  for (i = 0; i < nb_input_files; i++) {
3847  InputFile *f = input_files[i];
3848 
3849  if (f->ctx->pb ? !f->ctx->pb->seekable :
3850  strcmp(f->ctx->iformat->name, "lavfi"))
3851  f->non_blocking = 1;
3852  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3853  f->thread_queue_size, sizeof(AVPacket));
3854  if (ret < 0)
3855  return ret;
3856 
3857  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3858  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3859  av_thread_message_queue_free(&f->in_thread_queue);
3860  return AVERROR(ret);
3861  }
3862  }
3863  return 0;
3864 }
3865 
3866 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3867 {
3868  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3869  f->non_blocking ?
3871 }
3872 #endif
3873 
3875 {
3876  if (f->rate_emu) {
3877  int i;
3878  for (i = 0; i < f->nb_streams; i++) {
3879  InputStream *ist = input_streams[f->ist_index + i];
3880  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3881  int64_t now = av_gettime_relative() - ist->start;
3882  if (pts > now)
3883  return AVERROR(EAGAIN);
3884  }
3885  }
3886 
3887 #if HAVE_PTHREADS
3888  if (nb_input_files > 1)
3889  return get_input_packet_mt(f, pkt);
3890 #endif
3891  return av_read_frame(f->ctx, pkt);
3892 }
3893 
3894 static int got_eagain(void)
3895 {
3896  int i;
3897  for (i = 0; i < nb_output_streams; i++)
3898  if (output_streams[i]->unavailable)
3899  return 1;
3900  return 0;
3901 }
3902 
3903 static void reset_eagain(void)
3904 {
3905  int i;
3906  for (i = 0; i < nb_input_files; i++)
3907  input_files[i]->eagain = 0;
3908  for (i = 0; i < nb_output_streams; i++)
3909  output_streams[i]->unavailable = 0;
3910 }
3911 
3912 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3913 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3914  AVRational time_base)
3915 {
3916  int ret;
3917 
3918  if (!*duration) {
3919  *duration = tmp;
3920  return tmp_time_base;
3921  }
3922 
3923  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3924  if (ret < 0) {
3925  *duration = tmp;
3926  return tmp_time_base;
3927  }
3928 
3929  return time_base;
3930 }
3931 
3933 {
3934  InputStream *ist;
3935  AVCodecContext *avctx;
3936  int i, ret, has_audio = 0;
3937  int64_t duration = 0;
3938 
3939  ret = av_seek_frame(is, -1, is->start_time, 0);
3940  if (ret < 0)
3941  return ret;
3942 
3943  for (i = 0; i < ifile->nb_streams; i++) {
3944  ist = input_streams[ifile->ist_index + i];
3945  avctx = ist->dec_ctx;
3946 
3947  // flush decoders
3948  if (ist->decoding_needed) {
3949  process_input_packet(ist, NULL, 1);
3950  avcodec_flush_buffers(avctx);
3951  }
3952 
3953  /* duration is the length of the last frame in a stream
3954  * when audio stream is present we don't care about
3955  * last video frame length because it's not defined exactly */
3956  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3957  has_audio = 1;
3958  }
3959 
3960  for (i = 0; i < ifile->nb_streams; i++) {
3961  ist = input_streams[ifile->ist_index + i];
3962  avctx = ist->dec_ctx;
3963 
3964  if (has_audio) {
3965  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3966  AVRational sample_rate = {1, avctx->sample_rate};
3967 
3968  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3969  } else
3970  continue;
3971  } else {
3972  if (ist->framerate.num) {
3973  duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3974  } else if (ist->st->avg_frame_rate.num) {
3975  duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3976  } else duration = 1;
3977  }
3978  if (!ifile->duration)
3979  ifile->time_base = ist->st->time_base;
3980  /* the total duration of the stream, max_pts - min_pts is
3981  * the duration of the stream without the last frame */
3982  duration += ist->max_pts - ist->min_pts;
3983  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3984  ifile->time_base);
3985  }
3986 
3987  if (ifile->loop > 0)
3988  ifile->loop--;
3989 
3990  return ret;
3991 }
3992 
3993 /*
3994  * Return
3995  * - 0 -- one packet was read and processed
3996  * - AVERROR(EAGAIN) -- no packets were available for selected file,
3997  * this function should be called again
3998  * - AVERROR_EOF -- this function should not be called again
3999  */
4000 static int process_input(int file_index)
4001 {
4002  InputFile *ifile = input_files[file_index];
4003  AVFormatContext *is;
4004  InputStream *ist;
4005  AVPacket pkt;
4006  int ret, i, j;
4007  int64_t duration;
4008  int64_t pkt_dts;
4009 
4010  is = ifile->ctx;
4011  ret = get_input_packet(ifile, &pkt);
4012 
4013  if (ret == AVERROR(EAGAIN)) {
4014  ifile->eagain = 1;
4015  return ret;
4016  }
4017  if (ret < 0 && ifile->loop) {
4018  if ((ret = seek_to_start(ifile, is)) < 0)
4019  return ret;
4020  ret = get_input_packet(ifile, &pkt);
4021  if (ret == AVERROR(EAGAIN)) {
4022  ifile->eagain = 1;
4023  return ret;
4024  }
4025  }
4026  if (ret < 0) {
4027  if (ret != AVERROR_EOF) {
4028  print_error(is->filename, ret);
4029  if (exit_on_error)
4030  exit_program(1);
4031  }
4032 
4033  for (i = 0; i < ifile->nb_streams; i++) {
4034  ist = input_streams[ifile->ist_index + i];
4035  if (ist->decoding_needed) {
4036  ret = process_input_packet(ist, NULL, 0);
4037  if (ret>0)
4038  return 0;
4039  }
4040 
4041  /* mark all outputs that don't go through lavfi as finished */
4042  for (j = 0; j < nb_output_streams; j++) {
4043  OutputStream *ost = output_streams[j];
4044 
4045  if (ost->source_index == ifile->ist_index + i &&
4046  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4047  finish_output_stream(ost);
4048  }
4049  }
4050 
4051  ifile->eof_reached = 1;
4052  return AVERROR(EAGAIN);
4053  }
4054 
4055  reset_eagain();
4056 
4057  if (do_pkt_dump) {
4059  is->streams[pkt.stream_index]);
4060  }
4061  /* the following test is needed in case new streams appear
4062  dynamically in stream : we ignore them */
4063  if (pkt.stream_index >= ifile->nb_streams) {
4064  report_new_stream(file_index, &pkt);
4065  goto discard_packet;
4066  }
4067 
4068  ist = input_streams[ifile->ist_index + pkt.stream_index];
4069 
4070  ist->data_size += pkt.size;
4071  ist->nb_packets++;
4072 
4073  if (ist->discard)
4074  goto discard_packet;
4075 
4076  if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4077  av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4078  exit_program(1);
4079  }
4080 
4081  if (debug_ts) {
4082  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4083  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4087  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4088  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4089  av_ts2str(input_files[ist->file_index]->ts_offset),
4090  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4091  }
4092 
4093  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4094  int64_t stime, stime2;
4095  // Correcting starttime based on the enabled streams
4096  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4097  // so we instead do it here as part of discontinuity handling
4098  if ( ist->next_dts == AV_NOPTS_VALUE
4099  && ifile->ts_offset == -is->start_time
4100  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4101  int64_t new_start_time = INT64_MAX;
4102  for (i=0; i<is->nb_streams; i++) {
4103  AVStream *st = is->streams[i];
4104  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4105  continue;
4106  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4107  }
4108  if (new_start_time > is->start_time) {
4109  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4110  ifile->ts_offset = -new_start_time;
4111  }
4112  }
4113 
4114  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4115  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4116  ist->wrap_correction_done = 1;
4117 
4118  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4119  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4120  ist->wrap_correction_done = 0;
4121  }
4122  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4123  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4124  ist->wrap_correction_done = 0;
4125  }
4126  }
4127 
4128  /* add the stream-global side data to the first packet */
4129  if (ist->nb_packets == 1) {
4130  if (ist->st->nb_side_data)
4132  for (i = 0; i < ist->st->nb_side_data; i++) {
4133  AVPacketSideData *src_sd = &ist->st->side_data[i];
4134  uint8_t *dst_data;
4135 
4136  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4137  continue;
4138  if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4139  continue;
4140 
4141  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4142  if (!dst_data)
4143  exit_program(1);
4144 
4145  memcpy(dst_data, src_sd->data, src_sd->size);
4146  }
4147  }
4148 
4149  if (pkt.dts != AV_NOPTS_VALUE)
4150  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4151  if (pkt.pts != AV_NOPTS_VALUE)
4152  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4153 
4154  if (pkt.pts != AV_NOPTS_VALUE)
4155  pkt.pts *= ist->ts_scale;
4156  if (pkt.dts != AV_NOPTS_VALUE)
4157  pkt.dts *= ist->ts_scale;
4158 
4160  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4162  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4163  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4164  int64_t delta = pkt_dts - ifile->last_ts;
4165  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4166  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4167  ifile->ts_offset -= delta;
4169  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4170  delta, ifile->ts_offset);
4171  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4172  if (pkt.pts != AV_NOPTS_VALUE)
4173  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4174  }
4175  }
4176 
4177  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4178  if (pkt.pts != AV_NOPTS_VALUE) {
4179  pkt.pts += duration;
4180  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4181  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4182  }
4183 
4184  if (pkt.dts != AV_NOPTS_VALUE)
4185  pkt.dts += duration;
4186 
4188  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4190  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4191  !copy_ts) {
4192  int64_t delta = pkt_dts - ist->next_dts;
4193  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4194  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4195  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4196  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4197  ifile->ts_offset -= delta;
4199  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4200  delta, ifile->ts_offset);
4201  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4202  if (pkt.pts != AV_NOPTS_VALUE)
4203  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4204  }
4205  } else {
4206  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4207  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4208  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4209  pkt.dts = AV_NOPTS_VALUE;
4210  }
4211  if (pkt.pts != AV_NOPTS_VALUE){
4212  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4213  delta = pkt_pts - ist->next_dts;
4214  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4215  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4216  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4217  pkt.pts = AV_NOPTS_VALUE;
4218  }
4219  }
4220  }
4221  }
4222 
4223  if (pkt.dts != AV_NOPTS_VALUE)
4224  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4225 
4226  if (debug_ts) {
4227  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4229  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4230  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4231  av_ts2str(input_files[ist->file_index]->ts_offset),
4232  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4233  }
4234 
4235  sub2video_heartbeat(ist, pkt.pts);
4236 
4237  process_input_packet(ist, &pkt, 0);
4238 
4239 discard_packet:
4240  av_packet_unref(&pkt);
4241 
4242  return 0;
4243 }
4244 
4245 /**
4246  * Perform a step of transcoding for the specified filter graph.
4247  *
4248  * @param[in] graph filter graph to consider
4249  * @param[out] best_ist input stream where a frame would allow to continue
4250  * @return 0 for success, <0 for error
4251  */
4252 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4253 {
4254  int i, ret;
4255  int nb_requests, nb_requests_max = 0;
4256  InputFilter *ifilter;
4257  InputStream *ist;
4258 
4259  *best_ist = NULL;
4260  ret = avfilter_graph_request_oldest(graph->graph);
4261  if (ret >= 0)
4262  return reap_filters(0);
4263 
4264  if (ret == AVERROR_EOF) {
4265  ret = reap_filters(1);
4266  for (i = 0; i < graph->nb_outputs; i++)
4267  close_output_stream(graph->outputs[i]->ost);
4268  return ret;
4269  }
4270  if (ret != AVERROR(EAGAIN))
4271  return ret;
4272 
4273  for (i = 0; i < graph->nb_inputs; i++) {
4274  ifilter = graph->inputs[i];
4275  ist = ifilter->ist;
4276  if (input_files[ist->file_index]->eagain ||
4277  input_files[ist->file_index]->eof_reached)
4278  continue;
4279  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4280  if (nb_requests > nb_requests_max) {
4281  nb_requests_max = nb_requests;
4282  *best_ist = ist;
4283  }
4284  }
4285 
4286  if (!*best_ist)
4287  for (i = 0; i < graph->nb_outputs; i++)
4288  graph->outputs[i]->ost->unavailable = 1;
4289 
4290  return 0;
4291 }
4292 
4293 /**
4294  * Run a single step of transcoding.
4295  *
4296  * @return 0 for success, <0 for error
4297  */
4298 static int transcode_step(void)
4299 {
4300  OutputStream *ost;
4301  InputStream *ist;
4302  int ret;
4303 
4304  ost = choose_output();
4305  if (!ost) {
4306  if (got_eagain()) {
4307  reset_eagain();
4308  av_usleep(10000);
4309  return 0;
4310  }
4311  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4312  return AVERROR_EOF;
4313  }
4314 
4315  if (ost->filter) {
4316  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4317  return ret;
4318  if (!ist)
4319  return 0;
4320  } else {
4321  av_assert0(ost->source_index >= 0);
4322  ist = input_streams[ost->source_index];
4323  }
4324 
4325  ret = process_input(ist->file_index);
4326  if (ret == AVERROR(EAGAIN)) {
4327  if (input_files[ist->file_index]->eagain)
4328  ost->unavailable = 1;
4329  return 0;
4330  }
4331 
4332  if (ret < 0)
4333  return ret == AVERROR_EOF ? 0 : ret;
4334 
4335  return reap_filters(0);
4336 }
4337 
4338 /*
4339  * The following code is the main loop of the file converter
4340  */
4341 static int transcode(void)
4342 {
4343  int ret, i;
4344  AVFormatContext *os;
4345  OutputStream *ost;
4346  InputStream *ist;
4347  int64_t timer_start;
4348  int64_t total_packets_written = 0;
4349 
4350  ret = transcode_init();
4351  if (ret < 0)
4352  goto fail;
4353 
4354  if (stdin_interaction) {
4355  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4356  }
4357 
4358  timer_start = av_gettime_relative();
4359 
4360 #if HAVE_PTHREADS
4361  if ((ret = init_input_threads()) < 0)
4362  goto fail;
4363 #endif
4364 
4365  while (!received_sigterm) {
4366  int64_t cur_time= av_gettime_relative();
4367 
4368  /* if 'q' pressed, exits */
4369  if (stdin_interaction)
4370  if (check_keyboard_interaction(cur_time) < 0)
4371  break;
4372 
4373  /* check if there's any stream where output is still needed */
4374  if (!need_output()) {
4375  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4376  break;
4377  }
4378 
4379  ret = transcode_step();
4380  if (ret < 0 && ret != AVERROR_EOF) {
4381  char errbuf[128];
4382  av_strerror(ret, errbuf, sizeof(errbuf));
4383 
4384  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4385  break;
4386  }
4387 
4388  /* dump report by using the output first video and audio streams */
4389  print_report(0, timer_start, cur_time);
4390  }
4391 #if HAVE_PTHREADS
4392  free_input_threads();
4393 #endif
4394 
4395  /* at the end of stream, we must flush the decoder buffers */
4396  for (i = 0; i < nb_input_streams; i++) {
4397  ist = input_streams[i];
4398  if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4399  process_input_packet(ist, NULL, 0);
4400  }
4401  }
4402  flush_encoders();
4403 
4404  term_exit();
4405 
4406  /* write the trailer if needed and close file */
4407  for (i = 0; i < nb_output_files; i++) {
4408  os = output_files[i]->ctx;
4409  if (!output_files[i]->header_written) {
4411  "Nothing was written into output file %d (%s), because "
4412  "at least one of its streams received no packets.\n",
4413  i, os->filename);
4414  continue;
4415  }
4416  if ((ret = av_write_trailer(os)) < 0) {
4417  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4418  if (exit_on_error)
4419  exit_program(1);
4420  }
4421  }
4422 
4423  /* dump report by using the first video and audio streams */
4424  print_report(1, timer_start, av_gettime_relative());
4425 
4426  /* close each encoder */
4427  for (i = 0; i < nb_output_streams; i++) {
4428  ost = output_streams[i];
4429  if (ost->encoding_needed) {
4430  av_freep(&ost->enc_ctx->stats_in);
4431  }
4432  total_packets_written += ost->packets_written;
4433  }
4434 
4435  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4436  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4437  exit_program(1);
4438  }
4439 
4440  /* close each decoder */
4441  for (i = 0; i < nb_input_streams; i++) {
4442  ist = input_streams[i];
4443  if (ist->decoding_needed) {
4444  avcodec_close(ist->dec_ctx);
4445  if (ist->hwaccel_uninit)
4446  ist->hwaccel_uninit(ist->dec_ctx);
4447  }
4448  }
4449 
4451 
4452  /* finished ! */
4453  ret = 0;
4454 
4455  fail:
4456 #if HAVE_PTHREADS
4457  free_input_threads();
4458 #endif
4459 
4460  if (output_streams) {
4461  for (i = 0; i < nb_output_streams; i++) {
4462  ost = output_streams[i];
4463  if (ost) {
4464  if (ost->logfile) {
4465  if (fclose(ost->logfile))
4467  "Error closing logfile, loss of information possible: %s\n",
4468  av_err2str(AVERROR(errno)));
4469  ost->logfile = NULL;
4470  }
4471  av_freep(&ost->forced_kf_pts);
4472  av_freep(&ost->apad);
4473  av_freep(&ost->disposition);
4474  av_dict_free(&ost->encoder_opts);
4475  av_dict_free(&ost->sws_dict);
4476  av_dict_free(&ost->swr_opts);
4477  av_dict_free(&ost->resample_opts);
4478  }
4479  }
4480  }
4481  return ret;
4482 }
4483 
4484 
4485 static int64_t getutime(void)
4486 {
4487 #if HAVE_GETRUSAGE
4488  struct rusage rusage;
4489 
4490  getrusage(RUSAGE_SELF, &rusage);
4491  return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4492 #elif HAVE_GETPROCESSTIMES
4493  HANDLE proc;
4494  FILETIME c, e, k, u;
4495  proc = GetCurrentProcess();
4496  GetProcessTimes(proc, &c, &e, &k, &u);
4497  return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4498 #else
4499  return av_gettime_relative();
4500 #endif
4501 }
4502 
4503 static int64_t getmaxrss(void)
4504 {
4505 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4506  struct rusage rusage;
4507  getrusage(RUSAGE_SELF, &rusage);
4508  return (int64_t)rusage.ru_maxrss * 1024;
4509 #elif HAVE_GETPROCESSMEMORYINFO
4510  HANDLE proc;
4511  PROCESS_MEMORY_COUNTERS memcounters;
4512  proc = GetCurrentProcess();
4513  memcounters.cb = sizeof(memcounters);
4514  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4515  return memcounters.PeakPagefileUsage;
4516 #else
4517  return 0;
4518 #endif
4519 }
4520 
4521 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4522 {
4523 }
4524 
4525 int main(int argc, char **argv)
4526 {
4527  int i, ret;
4528  int64_t ti;
4529 
4530  init_dynload();
4531 
4533 
4534  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4535 
4537  parse_loglevel(argc, argv, options);
4538 
4539  if(argc>1 && !strcmp(argv[1], "-d")){
4540  run_as_daemon=1;
4542  argc--;
4543  argv++;
4544  }
4545 
4547 #if CONFIG_AVDEVICE
4549 #endif
4551  av_register_all();
4553 
4554  show_banner(argc, argv, options);
4555 
4556  /* parse options and open all input/output files */
4557  ret = ffmpeg_parse_options(argc, argv);
4558  if (ret < 0)
4559  exit_program(1);
4560 
4561  if (nb_output_files <= 0 && nb_input_files == 0) {
4562  show_usage();
4563  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4564  exit_program(1);
4565  }
4566 
4567  /* file converter / grab */
4568  if (nb_output_files <= 0) {
4569  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4570  exit_program(1);
4571  }
4572 
4573 // if (nb_input_files == 0) {
4574 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4575 // exit_program(1);
4576 // }
4577 
4578  for (i = 0; i < nb_output_files; i++) {
4579  if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4580  want_sdp = 0;
4581  }
4582 
4583  current_time = ti = getutime();
4584  if (transcode() < 0)
4585  exit_program(1);
4586  ti = getutime() - ti;
4587  if (do_benchmark) {
4588  av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4589  }
4590  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4593  exit_program(69);
4594 
4596  return main_return_code;
4597 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1543
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:307
int nb_bitstream_filters
Definition: ffmpeg.h:450
#define extra_bits(eb)
Definition: intrax8.c:159
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:900
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:119
uint32_t BOOL
#define FF_DEBUG_VIS_MB_TYPE
only access through AVOptions from outside libavcodec
Definition: avcodec.h:2951
int got_output
Definition: ffmpeg.h:335
#define AV_DISPOSITION_METADATA
Definition: avformat.h:873
void av_bsf_free(AVBSFContext **ctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:36
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:1887
int64_t first_dts
Timestamp corresponding to the last dts sync point.
Definition: avformat.h:1060
int guess_input_channel_layout(InputStream *ist)
Definition: ffmpeg.c:2000
#define NULL
Definition: coverity.c:32
int frame_number
Definition: ffmpeg.h:439
const struct AVCodec * codec
Definition: avcodec.h:1696
Definition: ffmpeg.h:416
AVRational framerate
Definition: avcodec.h:3387
enum AVFieldOrder field_order
Video only.
Definition: avcodec.h:4081
static void do_subtitle_out(OutputFile *of, OutputStream *ost, AVSubtitle *sub)
Definition: ffmpeg.c:925
enum HWAccelID active_hwaccel_id
Definition: ffmpeg.h:362
const char * s
Definition: avisynth_c.h:768
Bytestream IO Context.
Definition: avio.h:147
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static enum AVPixelFormat pix_fmt
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:546
void term_init(void)
Definition: ffmpeg.c:369
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:309
AVCodecParameters * par_out
Parameters of the output stream.
Definition: avcodec.h:5782
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
int64_t av_frame_get_pkt_duration(const AVFrame *frame)
uint8_t * name
Definition: ffmpeg.h:254
int nb_outputs
Definition: ffmpeg.h:283
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:124
AVDictionary * swr_opts
Definition: ffmpeg.h:496
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:293
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2332
int resample_channels
Definition: ffmpeg.h:330
#define av_realloc_f(p, o, n)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:190
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
static int init_output_stream_streamcopy(OutputStream *ost)
Definition: ffmpeg.c:2863