FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
65 #include "libavcodec/mathops.h"
66 #include "libavformat/os_support.h"
67 
68 # include "libavfilter/avfilter.h"
69 # include "libavfilter/buffersrc.h"
70 # include "libavfilter/buffersink.h"
71 
72 #if HAVE_SYS_RESOURCE_H
73 #include <sys/time.h>
74 #include <sys/types.h>
75 #include <sys/resource.h>
76 #elif HAVE_GETPROCESSTIMES
77 #include <windows.h>
78 #endif
79 #if HAVE_GETPROCESSMEMORYINFO
80 #include <windows.h>
81 #include <psapi.h>
82 #endif
83 #if HAVE_SETCONSOLECTRLHANDLER
84 #include <windows.h>
85 #endif
86 
87 
88 #if HAVE_SYS_SELECT_H
89 #include <sys/select.h>
90 #endif
91 
92 #if HAVE_TERMIOS_H
93 #include <fcntl.h>
94 #include <sys/ioctl.h>
95 #include <sys/time.h>
96 #include <termios.h>
97 #elif HAVE_KBHIT
98 #include <conio.h>
99 #endif
100 
101 #if HAVE_PTHREADS
102 #include <pthread.h>
103 #endif
104 
105 #include <time.h>
106 
107 #include "ffmpeg.h"
108 #include "cmdutils.h"
109 
110 #include "libavutil/avassert.h"
111 
112 const char program_name[] = "ffmpeg";
113 const int program_birth_year = 2000;
114 
115 static FILE *vstats_file;
116 
117 const char *const forced_keyframes_const_names[] = {
118  "n",
119  "n_forced",
120  "prev_forced_n",
121  "prev_forced_t",
122  "t",
123  NULL
124 };
125 
126 static void do_video_stats(OutputStream *ost, int frame_size);
127 static int64_t getutime(void);
128 static int64_t getmaxrss(void);
130 
131 static int run_as_daemon = 0;
132 static int nb_frames_dup = 0;
133 static unsigned dup_warning = 1000;
134 static int nb_frames_drop = 0;
135 static int64_t decode_error_stat[2];
136 
137 static int want_sdp = 1;
138 
139 static int current_time;
141 
143 
148 
153 
156 
157 #if HAVE_TERMIOS_H
158 
159 /* init terminal so that we can grab keys */
160 static struct termios oldtty;
161 static int restore_tty;
162 #endif
163 
164 #if HAVE_PTHREADS
165 static void free_input_threads(void);
166 #endif
167 
168 /* sub2video hack:
169  Convert subtitles to video with alpha to insert them in filter graphs.
170  This is a temporary solution until libavfilter gets real subtitles support.
171  */
172 
174 {
175  int ret;
176  AVFrame *frame = ist->sub2video.frame;
177 
178  av_frame_unref(frame);
179  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
180  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
182  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
183  return ret;
184  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
185  return 0;
186 }
187 
188 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
189  AVSubtitleRect *r)
190 {
191  uint32_t *pal, *dst2;
192  uint8_t *src, *src2;
193  int x, y;
194 
195  if (r->type != SUBTITLE_BITMAP) {
196  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
197  return;
198  }
199  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
200  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
201  r->x, r->y, r->w, r->h, w, h
202  );
203  return;
204  }
205 
206  dst += r->y * dst_linesize + r->x * 4;
207  src = r->data[0];
208  pal = (uint32_t *)r->data[1];
209  for (y = 0; y < r->h; y++) {
210  dst2 = (uint32_t *)dst;
211  src2 = src;
212  for (x = 0; x < r->w; x++)
213  *(dst2++) = pal[*(src2++)];
214  dst += dst_linesize;
215  src += r->linesize[0];
216  }
217 }
218 
219 static void sub2video_push_ref(InputStream *ist, int64_t pts)
220 {
221  AVFrame *frame = ist->sub2video.frame;
222  int i;
223  int ret;
224 
225  av_assert1(frame->data[0]);
226  ist->sub2video.last_pts = frame->pts = pts;
227  for (i = 0; i < ist->nb_filters; i++) {
228  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
231  if (ret != AVERROR_EOF && ret < 0)
232  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
233  av_err2str(ret));
234  }
235 }
236 
238 {
239  AVFrame *frame = ist->sub2video.frame;
240  int8_t *dst;
241  int dst_linesize;
242  int num_rects, i;
243  int64_t pts, end_pts;
244 
245  if (!frame)
246  return;
247  if (sub) {
248  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
249  AV_TIME_BASE_Q, ist->st->time_base);
250  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
251  AV_TIME_BASE_Q, ist->st->time_base);
252  num_rects = sub->num_rects;
253  } else {
254  pts = ist->sub2video.end_pts;
255  end_pts = INT64_MAX;
256  num_rects = 0;
257  }
258  if (sub2video_get_blank_frame(ist) < 0) {
260  "Impossible to get a blank canvas.\n");
261  return;
262  }
263  dst = frame->data [0];
264  dst_linesize = frame->linesize[0];
265  for (i = 0; i < num_rects; i++)
266  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
267  sub2video_push_ref(ist, pts);
268  ist->sub2video.end_pts = end_pts;
269 }
270 
271 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
272 {
273  InputFile *infile = input_files[ist->file_index];
274  int i, j, nb_reqs;
275  int64_t pts2;
276 
277  /* When a frame is read from a file, examine all sub2video streams in
278  the same file and send the sub2video frame again. Otherwise, decoded
279  video frames could be accumulating in the filter graph while a filter
280  (possibly overlay) is desperately waiting for a subtitle frame. */
281  for (i = 0; i < infile->nb_streams; i++) {
282  InputStream *ist2 = input_streams[infile->ist_index + i];
283  if (!ist2->sub2video.frame)
284  continue;
285  /* subtitles seem to be usually muxed ahead of other streams;
286  if not, subtracting a larger time here is necessary */
287  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
288  /* do not send the heartbeat frame if the subtitle is already ahead */
289  if (pts2 <= ist2->sub2video.last_pts)
290  continue;
291  if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
292  sub2video_update(ist2, NULL);
293  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
294  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
295  if (nb_reqs)
296  sub2video_push_ref(ist2, pts2);
297  }
298 }
299 
300 static void sub2video_flush(InputStream *ist)
301 {
302  int i;
303  int ret;
304 
305  if (ist->sub2video.end_pts < INT64_MAX)
306  sub2video_update(ist, NULL);
307  for (i = 0; i < ist->nb_filters; i++) {
308  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
309  if (ret != AVERROR_EOF && ret < 0)
310  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
311  }
312 }
313 
314 /* end of sub2video hack */
315 
316 static void term_exit_sigsafe(void)
317 {
318 #if HAVE_TERMIOS_H
319  if(restore_tty)
320  tcsetattr (0, TCSANOW, &oldtty);
321 #endif
322 }
323 
324 void term_exit(void)
325 {
326  av_log(NULL, AV_LOG_QUIET, "%s", "");
328 }
329 
330 static volatile int received_sigterm = 0;
331 static volatile int received_nb_signals = 0;
333 static volatile int ffmpeg_exited = 0;
334 static int main_return_code = 0;
335 
336 static void
338 {
339  int ret;
340  received_sigterm = sig;
343  if(received_nb_signals > 3) {
344  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
345  strlen("Received > 3 system signals, hard exiting\n"));
346  if (ret < 0) { /* Do nothing */ };
347  exit(123);
348  }
349 }
350 
351 #if HAVE_SETCONSOLECTRLHANDLER
352 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
353 {
354  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
355 
356  switch (fdwCtrlType)
357  {
358  case CTRL_C_EVENT:
359  case CTRL_BREAK_EVENT:
360  sigterm_handler(SIGINT);
361  return TRUE;
362 
363  case CTRL_CLOSE_EVENT:
364  case CTRL_LOGOFF_EVENT:
365  case CTRL_SHUTDOWN_EVENT:
366  sigterm_handler(SIGTERM);
367  /* Basically, with these 3 events, when we return from this method the
368  process is hard terminated, so stall as long as we need to
369  to try and let the main thread(s) clean up and gracefully terminate
370  (we have at most 5 seconds, but should be done far before that). */
371  while (!ffmpeg_exited) {
372  Sleep(0);
373  }
374  return TRUE;
375 
376  default:
377  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
378  return FALSE;
379  }
380 }
381 #endif
382 
383 void term_init(void)
384 {
385 #if HAVE_TERMIOS_H
387  struct termios tty;
388  if (tcgetattr (0, &tty) == 0) {
389  oldtty = tty;
390  restore_tty = 1;
391 
392  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
393  |INLCR|IGNCR|ICRNL|IXON);
394  tty.c_oflag |= OPOST;
395  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
396  tty.c_cflag &= ~(CSIZE|PARENB);
397  tty.c_cflag |= CS8;
398  tty.c_cc[VMIN] = 1;
399  tty.c_cc[VTIME] = 0;
400 
401  tcsetattr (0, TCSANOW, &tty);
402  }
403  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
404  }
405 #endif
406 
407  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
408  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
409 #ifdef SIGXCPU
410  signal(SIGXCPU, sigterm_handler);
411 #endif
412 #if HAVE_SETCONSOLECTRLHANDLER
413  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
414 #endif
415 }
416 
417 /* read a key without blocking */
418 static int read_key(void)
419 {
420  unsigned char ch;
421 #if HAVE_TERMIOS_H
422  int n = 1;
423  struct timeval tv;
424  fd_set rfds;
425 
426  FD_ZERO(&rfds);
427  FD_SET(0, &rfds);
428  tv.tv_sec = 0;
429  tv.tv_usec = 0;
430  n = select(1, &rfds, NULL, NULL, &tv);
431  if (n > 0) {
432  n = read(0, &ch, 1);
433  if (n == 1)
434  return ch;
435 
436  return n;
437  }
438 #elif HAVE_KBHIT
439 # if HAVE_PEEKNAMEDPIPE
440  static int is_pipe;
441  static HANDLE input_handle;
442  DWORD dw, nchars;
443  if(!input_handle){
444  input_handle = GetStdHandle(STD_INPUT_HANDLE);
445  is_pipe = !GetConsoleMode(input_handle, &dw);
446  }
447 
448  if (is_pipe) {
449  /* When running under a GUI, you will end here. */
450  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
451  // input pipe may have been closed by the program that ran ffmpeg
452  return -1;
453  }
454  //Read it
455  if(nchars != 0) {
456  read(0, &ch, 1);
457  return ch;
458  }else{
459  return -1;
460  }
461  }
462 # endif
463  if(kbhit())
464  return(getch());
465 #endif
466  return -1;
467 }
468 
469 static int decode_interrupt_cb(void *ctx)
470 {
472 }
473 
475 
476 static void ffmpeg_cleanup(int ret)
477 {
478  int i, j;
479 
480  if (do_benchmark) {
481  int maxrss = getmaxrss() / 1024;
482  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
483  }
484 
485  for (i = 0; i < nb_filtergraphs; i++) {
486  FilterGraph *fg = filtergraphs[i];
488  for (j = 0; j < fg->nb_inputs; j++) {
489  while (av_fifo_size(fg->inputs[j]->frame_queue)) {
490  AVFrame *frame;
491  av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
492  sizeof(frame), NULL);
493  av_frame_free(&frame);
494  }
495  av_fifo_freep(&fg->inputs[j]->frame_queue);
496  if (fg->inputs[j]->ist->sub2video.sub_queue) {
497  while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
498  AVSubtitle sub;
500  &sub, sizeof(sub), NULL);
501  avsubtitle_free(&sub);
502  }
504  }
506  av_freep(&fg->inputs[j]->name);
507  av_freep(&fg->inputs[j]);
508  }
509  av_freep(&fg->inputs);
510  for (j = 0; j < fg->nb_outputs; j++) {
511  av_freep(&fg->outputs[j]->name);
512  av_freep(&fg->outputs[j]->formats);
513  av_freep(&fg->outputs[j]->channel_layouts);
514  av_freep(&fg->outputs[j]->sample_rates);
515  av_freep(&fg->outputs[j]);
516  }
517  av_freep(&fg->outputs);
518  av_freep(&fg->graph_desc);
519 
520  av_freep(&filtergraphs[i]);
521  }
522  av_freep(&filtergraphs);
523 
525 
526  /* close files */
527  for (i = 0; i < nb_output_files; i++) {
528  OutputFile *of = output_files[i];
530  if (!of)
531  continue;
532  s = of->ctx;
533  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
534  avio_closep(&s->pb);
536  av_dict_free(&of->opts);
537 
538  av_freep(&output_files[i]);
539  }
540  for (i = 0; i < nb_output_streams; i++) {
541  OutputStream *ost = output_streams[i];
542 
543  if (!ost)
544  continue;
545 
546  for (j = 0; j < ost->nb_bitstream_filters; j++)
547  av_bsf_free(&ost->bsf_ctx[j]);
548  av_freep(&ost->bsf_ctx);
549 
551  av_frame_free(&ost->last_frame);
552  av_dict_free(&ost->encoder_opts);
553 
554  av_parser_close(ost->parser);
556 
557  av_freep(&ost->forced_keyframes);
559  av_freep(&ost->avfilter);
560  av_freep(&ost->logfile_prefix);
561 
563  ost->audio_channels_mapped = 0;
564 
565  av_dict_free(&ost->sws_dict);
566 
569 
570  if (ost->muxing_queue) {
571  while (av_fifo_size(ost->muxing_queue)) {
572  AVPacket pkt;
573  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
574  av_packet_unref(&pkt);
575  }
577  }
578 
579  av_freep(&output_streams[i]);
580  }
581 #if HAVE_PTHREADS
582  free_input_threads();
583 #endif
584  for (i = 0; i < nb_input_files; i++) {
585  avformat_close_input(&input_files[i]->ctx);
586  av_freep(&input_files[i]);
587  }
588  for (i = 0; i < nb_input_streams; i++) {
589  InputStream *ist = input_streams[i];
590 
593  av_dict_free(&ist->decoder_opts);
596  av_freep(&ist->filters);
597  av_freep(&ist->hwaccel_device);
598  av_freep(&ist->dts_buffer);
599 
601 
602  av_freep(&input_streams[i]);
603  }
604 
605  if (vstats_file) {
606  if (fclose(vstats_file))
608  "Error closing vstats file, loss of information possible: %s\n",
609  av_err2str(AVERROR(errno)));
610  }
612 
613  av_freep(&input_streams);
614  av_freep(&input_files);
615  av_freep(&output_streams);
616  av_freep(&output_files);
617 
618  uninit_opts();
619 
621 
622  if (received_sigterm) {
623  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
624  (int) received_sigterm);
625  } else if (ret && atomic_load(&transcode_init_done)) {
626  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
627  }
628  term_exit();
629  ffmpeg_exited = 1;
630 }
631 
633 {
634  AVDictionaryEntry *t = NULL;
635 
636  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
638  }
639 }
640 
642 {
644  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
645  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
646  exit_program(1);
647  }
648 }
649 
650 static void abort_codec_experimental(AVCodec *c, int encoder)
651 {
652  exit_program(1);
653 }
654 
655 static void update_benchmark(const char *fmt, ...)
656 {
657  if (do_benchmark_all) {
658  int64_t t = getutime();
659  va_list va;
660  char buf[1024];
661 
662  if (fmt) {
663  va_start(va, fmt);
664  vsnprintf(buf, sizeof(buf), fmt, va);
665  va_end(va);
666  av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
667  }
668  current_time = t;
669  }
670 }
671 
672 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
673 {
674  int i;
675  for (i = 0; i < nb_output_streams; i++) {
676  OutputStream *ost2 = output_streams[i];
677  ost2->finished |= ost == ost2 ? this_stream : others;
678  }
679 }
680 
681 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
682 {
683  AVFormatContext *s = of->ctx;
684  AVStream *st = ost->st;
685  int ret;
686 
687  /*
688  * Audio encoders may split the packets -- #frames in != #packets out.
689  * But there is no reordering, so we can limit the number of output packets
690  * by simply dropping them here.
691  * Counting encoded video frames needs to be done separately because of
692  * reordering, see do_video_out().
693  * Do not count the packet when unqueued because it has been counted when queued.
694  */
695  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
696  if (ost->frame_number >= ost->max_frames) {
697  av_packet_unref(pkt);
698  return;
699  }
700  ost->frame_number++;
701  }
702 
703  if (!of->header_written) {
704  AVPacket tmp_pkt = {0};
705  /* the muxer is not initialized yet, buffer the packet */
706  if (!av_fifo_space(ost->muxing_queue)) {
707  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
708  ost->max_muxing_queue_size);
709  if (new_size <= av_fifo_size(ost->muxing_queue)) {
711  "Too many packets buffered for output stream %d:%d.\n",
712  ost->file_index, ost->st->index);
713  exit_program(1);
714  }
715  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
716  if (ret < 0)
717  exit_program(1);
718  }
719  ret = av_packet_ref(&tmp_pkt, pkt);
720  if (ret < 0)
721  exit_program(1);
722  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
723  av_packet_unref(pkt);
724  return;
725  }
726 
729  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
730 
731  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
732  int i;
734  NULL);
735  ost->quality = sd ? AV_RL32(sd) : -1;
736  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
737 
738  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
739  if (sd && i < sd[5])
740  ost->error[i] = AV_RL64(sd + 8 + 8*i);
741  else
742  ost->error[i] = -1;
743  }
744 
745  if (ost->frame_rate.num && ost->is_cfr) {
746  if (pkt->duration > 0)
747  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
748  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
749  ost->mux_timebase);
750  }
751  }
752 
753  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
754 
755  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
756  if (pkt->dts != AV_NOPTS_VALUE &&
757  pkt->pts != AV_NOPTS_VALUE &&
758  pkt->dts > pkt->pts) {
759  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
760  pkt->dts, pkt->pts,
761  ost->file_index, ost->st->index);
762  pkt->pts =
763  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
764  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
765  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
766  }
768  pkt->dts != AV_NOPTS_VALUE &&
769  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
770  ost->last_mux_dts != AV_NOPTS_VALUE) {
771  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
772  if (pkt->dts < max) {
773  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
774  av_log(s, loglevel, "Non-monotonous DTS in output stream "
775  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
776  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
777  if (exit_on_error) {
778  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
779  exit_program(1);
780  }
781  av_log(s, loglevel, "changing to %"PRId64". This may result "
782  "in incorrect timestamps in the output file.\n",
783  max);
784  if (pkt->pts >= pkt->dts)
785  pkt->pts = FFMAX(pkt->pts, max);
786  pkt->dts = max;
787  }
788  }
789  }
790  ost->last_mux_dts = pkt->dts;
791 
792  ost->data_size += pkt->size;
793  ost->packets_written++;
794 
795  pkt->stream_index = ost->index;
796 
797  if (debug_ts) {
798  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
799  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
801  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
802  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
803  pkt->size
804  );
805  }
806 
807  ret = av_interleaved_write_frame(s, pkt);
808  if (ret < 0) {
809  print_error("av_interleaved_write_frame()", ret);
810  main_return_code = 1;
812  }
813  av_packet_unref(pkt);
814 }
815 
817 {
818  OutputFile *of = output_files[ost->file_index];
819 
820  ost->finished |= ENCODER_FINISHED;
821  if (of->shortest) {
822  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
823  of->recording_time = FFMIN(of->recording_time, end);
824  }
825 }
826 
827 /*
828  * Send a single packet to the output, applying any bitstream filters
829  * associated with the output stream. This may result in any number
830  * of packets actually being written, depending on what bitstream
831  * filters are applied. The supplied packet is consumed and will be
832  * blank (as if newly-allocated) when this function returns.
833  *
834  * If eof is set, instead indicate EOF to all bitstream filters and
835  * therefore flush any delayed packets to the output. A blank packet
836  * must be supplied in this case.
837  */
839  OutputStream *ost, int eof)
840 {
841  int ret = 0;
842 
843  /* apply the output bitstream filters, if any */
844  if (ost->nb_bitstream_filters) {
845  int idx;
846 
847  ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
848  if (ret < 0)
849  goto finish;
850 
851  eof = 0;
852  idx = 1;
853  while (idx) {
854  /* get a packet from the previous filter up the chain */
855  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
856  if (ret == AVERROR(EAGAIN)) {
857  ret = 0;
858  idx--;
859  continue;
860  } else if (ret == AVERROR_EOF) {
861  eof = 1;
862  } else if (ret < 0)
863  goto finish;
864 
865  /* send it to the next filter down the chain or to the muxer */
866  if (idx < ost->nb_bitstream_filters) {
867  ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
868  if (ret < 0)
869  goto finish;
870  idx++;
871  eof = 0;
872  } else if (eof)
873  goto finish;
874  else
875  write_packet(of, pkt, ost, 0);
876  }
877  } else if (!eof)
878  write_packet(of, pkt, ost, 0);
879 
880 finish:
881  if (ret < 0 && ret != AVERROR_EOF) {
882  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
883  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
884  if(exit_on_error)
885  exit_program(1);
886  }
887 }
888 
890 {
891  OutputFile *of = output_files[ost->file_index];
892 
893  if (of->recording_time != INT64_MAX &&
895  AV_TIME_BASE_Q) >= 0) {
896  close_output_stream(ost);
897  return 0;
898  }
899  return 1;
900 }
901 
902 static void do_audio_out(OutputFile *of, OutputStream *ost,
903  AVFrame *frame)
904 {
905  AVCodecContext *enc = ost->enc_ctx;
906  AVPacket pkt;
907  int ret;
908 
909  av_init_packet(&pkt);
910  pkt.data = NULL;
911  pkt.size = 0;
912 
913  if (!check_recording_time(ost))
914  return;
915 
916  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
917  frame->pts = ost->sync_opts;
918  ost->sync_opts = frame->pts + frame->nb_samples;
919  ost->samples_encoded += frame->nb_samples;
920  ost->frames_encoded++;
921 
922  av_assert0(pkt.size || !pkt.data);
924  if (debug_ts) {
925  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
926  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
927  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
928  enc->time_base.num, enc->time_base.den);
929  }
930 
931  ret = avcodec_send_frame(enc, frame);
932  if (ret < 0)
933  goto error;
934 
935  while (1) {
936  ret = avcodec_receive_packet(enc, &pkt);
937  if (ret == AVERROR(EAGAIN))
938  break;
939  if (ret < 0)
940  goto error;
941 
942  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
943 
944  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
945 
946  if (debug_ts) {
947  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
948  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
949  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
950  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
951  }
952 
953  output_packet(of, &pkt, ost, 0);
954  }
955 
956  return;
957 error:
958  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
959  exit_program(1);
960 }
961 
962 static void do_subtitle_out(OutputFile *of,
963  OutputStream *ost,
964  AVSubtitle *sub)
965 {
966  int subtitle_out_max_size = 1024 * 1024;
967  int subtitle_out_size, nb, i;
968  AVCodecContext *enc;
969  AVPacket pkt;
970  int64_t pts;
971 
972  if (sub->pts == AV_NOPTS_VALUE) {
973  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
974  if (exit_on_error)
975  exit_program(1);
976  return;
977  }
978 
979  enc = ost->enc_ctx;
980 
981  if (!subtitle_out) {
982  subtitle_out = av_malloc(subtitle_out_max_size);
983  if (!subtitle_out) {
984  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
985  exit_program(1);
986  }
987  }
988 
989  /* Note: DVB subtitle need one packet to draw them and one other
990  packet to clear them */
991  /* XXX: signal it in the codec context ? */
993  nb = 2;
994  else
995  nb = 1;
996 
997  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
998  pts = sub->pts;
999  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1000  pts -= output_files[ost->file_index]->start_time;
1001  for (i = 0; i < nb; i++) {
1002  unsigned save_num_rects = sub->num_rects;
1003 
1004  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1005  if (!check_recording_time(ost))
1006  return;
1007 
1008  sub->pts = pts;
1009  // start_display_time is required to be 0
1010  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1011  sub->end_display_time -= sub->start_display_time;
1012  sub->start_display_time = 0;
1013  if (i == 1)
1014  sub->num_rects = 0;
1015 
1016  ost->frames_encoded++;
1017 
1018  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1019  subtitle_out_max_size, sub);
1020  if (i == 1)
1021  sub->num_rects = save_num_rects;
1022  if (subtitle_out_size < 0) {
1023  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1024  exit_program(1);
1025  }
1026 
1027  av_init_packet(&pkt);
1028  pkt.data = subtitle_out;
1029  pkt.size = subtitle_out_size;
1030  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1031  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1032  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1033  /* XXX: the pts correction is handled here. Maybe handling
1034  it in the codec would be better */
1035  if (i == 0)
1036  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1037  else
1038  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1039  }
1040  pkt.dts = pkt.pts;
1041  output_packet(of, &pkt, ost, 0);
1042  }
1043 }
1044 
1045 static void do_video_out(OutputFile *of,
1046  OutputStream *ost,
1047  AVFrame *next_picture,
1048  double sync_ipts)
1049 {
1050  int ret, format_video_sync;
1051  AVPacket pkt;
1052  AVCodecContext *enc = ost->enc_ctx;
1053  AVCodecParameters *mux_par = ost->st->codecpar;
1054  AVRational frame_rate;
1055  int nb_frames, nb0_frames, i;
1056  double delta, delta0;
1057  double duration = 0;
1058  int frame_size = 0;
1059  InputStream *ist = NULL;
1061 
1062  if (ost->source_index >= 0)
1063  ist = input_streams[ost->source_index];
1064 
1065  frame_rate = av_buffersink_get_frame_rate(filter);
1066  if (frame_rate.num > 0 && frame_rate.den > 0)
1067  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1068 
1069  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1070  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1071 
1072  if (!ost->filters_script &&
1073  !ost->filters &&
1074  next_picture &&
1075  ist &&
1076  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1077  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1078  }
1079 
1080  if (!next_picture) {
1081  //end, flushing
1082  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1083  ost->last_nb0_frames[1],
1084  ost->last_nb0_frames[2]);
1085  } else {
1086  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1087  delta = delta0 + duration;
1088 
1089  /* by default, we output a single frame */
1090  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1091  nb_frames = 1;
1092 
1093  format_video_sync = video_sync_method;
1094  if (format_video_sync == VSYNC_AUTO) {
1095  if(!strcmp(of->ctx->oformat->name, "avi")) {
1096  format_video_sync = VSYNC_VFR;
1097  } else
1098  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1099  if ( ist
1100  && format_video_sync == VSYNC_CFR
1101  && input_files[ist->file_index]->ctx->nb_streams == 1
1102  && input_files[ist->file_index]->input_ts_offset == 0) {
1103  format_video_sync = VSYNC_VSCFR;
1104  }
1105  if (format_video_sync == VSYNC_CFR && copy_ts) {
1106  format_video_sync = VSYNC_VSCFR;
1107  }
1108  }
1109  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1110 
1111  if (delta0 < 0 &&
1112  delta > 0 &&
1113  format_video_sync != VSYNC_PASSTHROUGH &&
1114  format_video_sync != VSYNC_DROP) {
1115  if (delta0 < -0.6) {
1116  av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1117  } else
1118  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1119  sync_ipts = ost->sync_opts;
1120  duration += delta0;
1121  delta0 = 0;
1122  }
1123 
1124  switch (format_video_sync) {
1125  case VSYNC_VSCFR:
1126  if (ost->frame_number == 0 && delta0 >= 0.5) {
1127  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1128  delta = duration;
1129  delta0 = 0;
1130  ost->sync_opts = lrint(sync_ipts);
1131  }
1132  case VSYNC_CFR:
1133  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1134  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1135  nb_frames = 0;
1136  } else if (delta < -1.1)
1137  nb_frames = 0;
1138  else if (delta > 1.1) {
1139  nb_frames = lrintf(delta);
1140  if (delta0 > 1.1)
1141  nb0_frames = lrintf(delta0 - 0.6);
1142  }
1143  break;
1144  case VSYNC_VFR:
1145  if (delta <= -0.6)
1146  nb_frames = 0;
1147  else if (delta > 0.6)
1148  ost->sync_opts = lrint(sync_ipts);
1149  break;
1150  case VSYNC_DROP:
1151  case VSYNC_PASSTHROUGH:
1152  ost->sync_opts = lrint(sync_ipts);
1153  break;
1154  default:
1155  av_assert0(0);
1156  }
1157  }
1158 
1159  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1160  nb0_frames = FFMIN(nb0_frames, nb_frames);
1161 
1162  memmove(ost->last_nb0_frames + 1,
1163  ost->last_nb0_frames,
1164  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1165  ost->last_nb0_frames[0] = nb0_frames;
1166 
1167  if (nb0_frames == 0 && ost->last_dropped) {
1168  nb_frames_drop++;
1170  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1171  ost->frame_number, ost->st->index, ost->last_frame->pts);
1172  }
1173  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1174  if (nb_frames > dts_error_threshold * 30) {
1175  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1176  nb_frames_drop++;
1177  return;
1178  }
1179  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1180  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1181  if (nb_frames_dup > dup_warning) {
1182  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1183  dup_warning *= 10;
1184  }
1185  }
1186  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1187 
1188  /* duplicates frame if needed */
1189  for (i = 0; i < nb_frames; i++) {
1190  AVFrame *in_picture;
1191  av_init_packet(&pkt);
1192  pkt.data = NULL;
1193  pkt.size = 0;
1194 
1195  if (i < nb0_frames && ost->last_frame) {
1196  in_picture = ost->last_frame;
1197  } else
1198  in_picture = next_picture;
1199 
1200  if (!in_picture)
1201  return;
1202 
1203  in_picture->pts = ost->sync_opts;
1204 
1205 #if 1
1206  if (!check_recording_time(ost))
1207 #else
1208  if (ost->frame_number >= ost->max_frames)
1209 #endif
1210  return;
1211 
1212  {
1213  int forced_keyframe = 0;
1214  double pts_time;
1215 
1217  ost->top_field_first >= 0)
1218  in_picture->top_field_first = !!ost->top_field_first;
1219 
1220  if (in_picture->interlaced_frame) {
1221  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1222  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1223  else
1224  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1225  } else
1226  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1227 
1228  in_picture->quality = enc->global_quality;
1229  in_picture->pict_type = 0;
1230 
1231  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1232  in_picture->pts * av_q2d(enc->time_base) : NAN;
1233  if (ost->forced_kf_index < ost->forced_kf_count &&
1234  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1235  ost->forced_kf_index++;
1236  forced_keyframe = 1;
1237  } else if (ost->forced_keyframes_pexpr) {
1238  double res;
1239  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1242  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1248  res);
1249  if (res) {
1250  forced_keyframe = 1;
1256  }
1257 
1259  } else if ( ost->forced_keyframes
1260  && !strncmp(ost->forced_keyframes, "source", 6)
1261  && in_picture->key_frame==1) {
1262  forced_keyframe = 1;
1263  }
1264 
1265  if (forced_keyframe) {
1266  in_picture->pict_type = AV_PICTURE_TYPE_I;
1267  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1268  }
1269 
1271  if (debug_ts) {
1272  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1273  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1274  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1275  enc->time_base.num, enc->time_base.den);
1276  }
1277 
1278  ost->frames_encoded++;
1279 
1280  ret = avcodec_send_frame(enc, in_picture);
1281  if (ret < 0)
1282  goto error;
1283 
1284  while (1) {
1285  ret = avcodec_receive_packet(enc, &pkt);
1286  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1287  if (ret == AVERROR(EAGAIN))
1288  break;
1289  if (ret < 0)
1290  goto error;
1291 
1292  if (debug_ts) {
1293  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1294  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1295  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1296  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1297  }
1298 
1299  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1300  pkt.pts = ost->sync_opts;
1301 
1302  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1303 
1304  if (debug_ts) {
1305  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1306  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1307  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1308  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1309  }
1310 
1311  frame_size = pkt.size;
1312  output_packet(of, &pkt, ost, 0);
1313 
1314  /* if two pass, output log */
1315  if (ost->logfile && enc->stats_out) {
1316  fprintf(ost->logfile, "%s", enc->stats_out);
1317  }
1318  }
1319  }
1320  ost->sync_opts++;
1321  /*
1322  * For video, number of frames in == number of packets out.
1323  * But there may be reordering, so we can't throw away frames on encoder
1324  * flush, we need to limit them here, before they go into encoder.
1325  */
1326  ost->frame_number++;
1327 
1328  if (vstats_filename && frame_size)
1329  do_video_stats(ost, frame_size);
1330  }
1331 
1332  if (!ost->last_frame)
1333  ost->last_frame = av_frame_alloc();
1334  av_frame_unref(ost->last_frame);
1335  if (next_picture && ost->last_frame)
1336  av_frame_ref(ost->last_frame, next_picture);
1337  else
1338  av_frame_free(&ost->last_frame);
1339 
1340  return;
1341 error:
1342  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1343  exit_program(1);
1344 }
1345 
1346 static double psnr(double d)
1347 {
1348  return -10.0 * log10(d);
1349 }
1350 
1352 {
1353  AVCodecContext *enc;
1354  int frame_number;
1355  double ti1, bitrate, avg_bitrate;
1356 
1357  /* this is executed just the first time do_video_stats is called */
1358  if (!vstats_file) {
1359  vstats_file = fopen(vstats_filename, "w");
1360  if (!vstats_file) {
1361  perror("fopen");
1362  exit_program(1);
1363  }
1364  }
1365 
1366  enc = ost->enc_ctx;
1367  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1368  frame_number = ost->st->nb_frames;
1369  if (vstats_version <= 1) {
1370  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1371  ost->quality / (float)FF_QP2LAMBDA);
1372  } else {
1373  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1374  ost->quality / (float)FF_QP2LAMBDA);
1375  }
1376 
1377  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1378  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1379 
1380  fprintf(vstats_file,"f_size= %6d ", frame_size);
1381  /* compute pts value */
1382  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1383  if (ti1 < 0.01)
1384  ti1 = 0.01;
1385 
1386  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1387  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1388  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1389  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1390  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1391  }
1392 }
1393 
1394 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1395 
1397 {
1398  OutputFile *of = output_files[ost->file_index];
1399  int i;
1400 
1402 
1403  if (of->shortest) {
1404  for (i = 0; i < of->ctx->nb_streams; i++)
1405  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1406  }
1407 }
1408 
1409 /**
1410  * Get and encode new output from any of the filtergraphs, without causing
1411  * activity.
1412  *
1413  * @return 0 for success, <0 for severe errors
1414  */
1415 static int reap_filters(int flush)
1416 {
1417  AVFrame *filtered_frame = NULL;
1418  int i;
1419 
1420  /* Reap all buffers present in the buffer sinks */
1421  for (i = 0; i < nb_output_streams; i++) {
1422  OutputStream *ost = output_streams[i];
1423  OutputFile *of = output_files[ost->file_index];
1425  AVCodecContext *enc = ost->enc_ctx;
1426  int ret = 0;
1427 
1428  if (!ost->filter || !ost->filter->graph->graph)
1429  continue;
1430  filter = ost->filter->filter;
1431 
1432  if (!ost->initialized) {
1433  char error[1024] = "";
1434  ret = init_output_stream(ost, error, sizeof(error));
1435  if (ret < 0) {
1436  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1437  ost->file_index, ost->index, error);
1438  exit_program(1);
1439  }
1440  }
1441 
1442  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1443  return AVERROR(ENOMEM);
1444  }
1445  filtered_frame = ost->filtered_frame;
1446 
1447  while (1) {
1448  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1449  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1451  if (ret < 0) {
1452  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1454  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1455  } else if (flush && ret == AVERROR_EOF) {
1457  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1458  }
1459  break;
1460  }
1461  if (ost->finished) {
1462  av_frame_unref(filtered_frame);
1463  continue;
1464  }
1465  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1466  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1467  AVRational filter_tb = av_buffersink_get_time_base(filter);
1468  AVRational tb = enc->time_base;
1469  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1470 
1471  tb.den <<= extra_bits;
1472  float_pts =
1473  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1474  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1475  float_pts /= 1 << extra_bits;
1476  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1477  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1478 
1479  filtered_frame->pts =
1480  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1481  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1482  }
1483  //if (ost->source_index >= 0)
1484  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1485 
1486  switch (av_buffersink_get_type(filter)) {
1487  case AVMEDIA_TYPE_VIDEO:
1488  if (!ost->frame_aspect_ratio.num)
1489  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1490 
1491  if (debug_ts) {
1492  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1493  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1494  float_pts,
1495  enc->time_base.num, enc->time_base.den);
1496  }
1497 
1498  do_video_out(of, ost, filtered_frame, float_pts);
1499  break;
1500  case AVMEDIA_TYPE_AUDIO:
1501  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1502  enc->channels != filtered_frame->channels) {
1504  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1505  break;
1506  }
1507  do_audio_out(of, ost, filtered_frame);
1508  break;
1509  default:
1510  // TODO support subtitle filters
1511  av_assert0(0);
1512  }
1513 
1514  av_frame_unref(filtered_frame);
1515  }
1516  }
1517 
1518  return 0;
1519 }
1520 
1521 static void print_final_stats(int64_t total_size)
1522 {
1523  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1524  uint64_t subtitle_size = 0;
1525  uint64_t data_size = 0;
1526  float percent = -1.0;
1527  int i, j;
1528  int pass1_used = 1;
1529 
1530  for (i = 0; i < nb_output_streams; i++) {
1531  OutputStream *ost = output_streams[i];
1532  switch (ost->enc_ctx->codec_type) {
1533  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1534  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1535  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1536  default: other_size += ost->data_size; break;
1537  }
1538  extra_size += ost->enc_ctx->extradata_size;
1539  data_size += ost->data_size;
1542  pass1_used = 0;
1543  }
1544 
1545  if (data_size && total_size>0 && total_size >= data_size)
1546  percent = 100.0 * (total_size - data_size) / data_size;
1547 
1548  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1549  video_size / 1024.0,
1550  audio_size / 1024.0,
1551  subtitle_size / 1024.0,
1552  other_size / 1024.0,
1553  extra_size / 1024.0);
1554  if (percent >= 0.0)
1555  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1556  else
1557  av_log(NULL, AV_LOG_INFO, "unknown");
1558  av_log(NULL, AV_LOG_INFO, "\n");
1559 
1560  /* print verbose per-stream stats */
1561  for (i = 0; i < nb_input_files; i++) {
1562  InputFile *f = input_files[i];
1563  uint64_t total_packets = 0, total_size = 0;
1564 
1565  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1566  i, f->ctx->filename);
1567 
1568  for (j = 0; j < f->nb_streams; j++) {
1569  InputStream *ist = input_streams[f->ist_index + j];
1570  enum AVMediaType type = ist->dec_ctx->codec_type;
1571 
1572  total_size += ist->data_size;
1573  total_packets += ist->nb_packets;
1574 
1575  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1576  i, j, media_type_string(type));
1577  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1578  ist->nb_packets, ist->data_size);
1579 
1580  if (ist->decoding_needed) {
1581  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1582  ist->frames_decoded);
1583  if (type == AVMEDIA_TYPE_AUDIO)
1584  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1585  av_log(NULL, AV_LOG_VERBOSE, "; ");
1586  }
1587 
1588  av_log(NULL, AV_LOG_VERBOSE, "\n");
1589  }
1590 
1591  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1592  total_packets, total_size);
1593  }
1594 
1595  for (i = 0; i < nb_output_files; i++) {
1596  OutputFile *of = output_files[i];
1597  uint64_t total_packets = 0, total_size = 0;
1598 
1599  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1600  i, of->ctx->filename);
1601 
1602  for (j = 0; j < of->ctx->nb_streams; j++) {
1603  OutputStream *ost = output_streams[of->ost_index + j];
1604  enum AVMediaType type = ost->enc_ctx->codec_type;
1605 
1606  total_size += ost->data_size;
1607  total_packets += ost->packets_written;
1608 
1609  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1610  i, j, media_type_string(type));
1611  if (ost->encoding_needed) {
1612  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1613  ost->frames_encoded);
1614  if (type == AVMEDIA_TYPE_AUDIO)
1615  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1616  av_log(NULL, AV_LOG_VERBOSE, "; ");
1617  }
1618 
1619  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1620  ost->packets_written, ost->data_size);
1621 
1622  av_log(NULL, AV_LOG_VERBOSE, "\n");
1623  }
1624 
1625  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1626  total_packets, total_size);
1627  }
1628  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1629  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1630  if (pass1_used) {
1631  av_log(NULL, AV_LOG_WARNING, "\n");
1632  } else {
1633  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1634  }
1635  }
1636 }
1637 
1638 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1639 {
1640  char buf[1024];
1641  AVBPrint buf_script;
1642  OutputStream *ost;
1643  AVFormatContext *oc;
1644  int64_t total_size;
1645  AVCodecContext *enc;
1646  int frame_number, vid, i;
1647  double bitrate;
1648  double speed;
1649  int64_t pts = INT64_MIN + 1;
1650  static int64_t last_time = -1;
1651  static int qp_histogram[52];
1652  int hours, mins, secs, us;
1653  int ret;
1654  float t;
1655 
1656  if (!print_stats && !is_last_report && !progress_avio)
1657  return;
1658 
1659  if (!is_last_report) {
1660  if (last_time == -1) {
1661  last_time = cur_time;
1662  return;
1663  }
1664  if ((cur_time - last_time) < 500000)
1665  return;
1666  last_time = cur_time;
1667  }
1668 
1669  t = (cur_time-timer_start) / 1000000.0;
1670 
1671 
1672  oc = output_files[0]->ctx;
1673 
1674  total_size = avio_size(oc->pb);
1675  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1676  total_size = avio_tell(oc->pb);
1677 
1678  buf[0] = '\0';
1679  vid = 0;
1680  av_bprint_init(&buf_script, 0, 1);
1681  for (i = 0; i < nb_output_streams; i++) {
1682  float q = -1;
1683  ost = output_streams[i];
1684  enc = ost->enc_ctx;
1685  if (!ost->stream_copy)
1686  q = ost->quality / (float) FF_QP2LAMBDA;
1687 
1688  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1689  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1690  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1691  ost->file_index, ost->index, q);
1692  }
1693  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1694  float fps;
1695 
1696  frame_number = ost->frame_number;
1697  fps = t > 1 ? frame_number / t : 0;
1698  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1699  frame_number, fps < 9.95, fps, q);
1700  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1701  av_bprintf(&buf_script, "fps=%.1f\n", fps);
1702  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1703  ost->file_index, ost->index, q);
1704  if (is_last_report)
1705  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1706  if (qp_hist) {
1707  int j;
1708  int qp = lrintf(q);
1709  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1710  qp_histogram[qp]++;
1711  for (j = 0; j < 32; j++)
1712  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1713  }
1714 
1715  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1716  int j;
1717  double error, error_sum = 0;
1718  double scale, scale_sum = 0;
1719  double p;
1720  char type[3] = { 'Y','U','V' };
1721  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1722  for (j = 0; j < 3; j++) {
1723  if (is_last_report) {
1724  error = enc->error[j];
1725  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1726  } else {
1727  error = ost->error[j];
1728  scale = enc->width * enc->height * 255.0 * 255.0;
1729  }
1730  if (j)
1731  scale /= 4;
1732  error_sum += error;
1733  scale_sum += scale;
1734  p = psnr(error / scale);
1735  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1736  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1737  ost->file_index, ost->index, type[j] | 32, p);
1738  }
1739  p = psnr(error_sum / scale_sum);
1740  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1741  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1742  ost->file_index, ost->index, p);
1743  }
1744  vid = 1;
1745  }
1746  /* compute min output value */
1748  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1749  ost->st->time_base, AV_TIME_BASE_Q));
1750  if (is_last_report)
1751  nb_frames_drop += ost->last_dropped;
1752  }
1753 
1754  secs = FFABS(pts) / AV_TIME_BASE;
1755  us = FFABS(pts) % AV_TIME_BASE;
1756  mins = secs / 60;
1757  secs %= 60;
1758  hours = mins / 60;
1759  mins %= 60;
1760 
1761  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1762  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1763 
1764  if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1765  "size=N/A time=");
1766  else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1767  "size=%8.0fkB time=", total_size / 1024.0);
1768  if (pts < 0)
1769  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1770  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1771  "%02d:%02d:%02d.%02d ", hours, mins, secs,
1772  (100 * us) / AV_TIME_BASE);
1773 
1774  if (bitrate < 0) {
1775  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1776  av_bprintf(&buf_script, "bitrate=N/A\n");
1777  }else{
1778  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1779  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1780  }
1781 
1782  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1783  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1784  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1785  av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1786  hours, mins, secs, us);
1787 
1789  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1791  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1792  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1793 
1794  if (speed < 0) {
1795  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1796  av_bprintf(&buf_script, "speed=N/A\n");
1797  } else {
1798  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1799  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1800  }
1801 
1802  if (print_stats || is_last_report) {
1803  const char end = is_last_report ? '\n' : '\r';
1804  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1805  fprintf(stderr, "%s %c", buf, end);
1806  } else
1807  av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1808 
1809  fflush(stderr);
1810  }
1811 
1812  if (progress_avio) {
1813  av_bprintf(&buf_script, "progress=%s\n",
1814  is_last_report ? "end" : "continue");
1815  avio_write(progress_avio, buf_script.str,
1816  FFMIN(buf_script.len, buf_script.size - 1));
1817  avio_flush(progress_avio);
1818  av_bprint_finalize(&buf_script, NULL);
1819  if (is_last_report) {
1820  if ((ret = avio_closep(&progress_avio)) < 0)
1822  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1823  }
1824  }
1825 
1826  if (is_last_report)
1827  print_final_stats(total_size);
1828 }
1829 
1830 static void flush_encoders(void)
1831 {
1832  int i, ret;
1833 
1834  for (i = 0; i < nb_output_streams; i++) {
1835  OutputStream *ost = output_streams[i];
1836  AVCodecContext *enc = ost->enc_ctx;
1837  OutputFile *of = output_files[ost->file_index];
1838 
1839  if (!ost->encoding_needed)
1840  continue;
1841 
1842  // Try to enable encoding with no input frames.
1843  // Maybe we should just let encoding fail instead.
1844  if (!ost->initialized) {
1845  FilterGraph *fg = ost->filter->graph;
1846  char error[1024] = "";
1847 
1849  "Finishing stream %d:%d without any data written to it.\n",
1850  ost->file_index, ost->st->index);
1851 
1852  if (ost->filter && !fg->graph) {
1853  int x;
1854  for (x = 0; x < fg->nb_inputs; x++) {
1855  InputFilter *ifilter = fg->inputs[x];
1856  if (ifilter->format < 0) {
1857  AVCodecParameters *par = ifilter->ist->st->codecpar;
1858  // We never got any input. Set a fake format, which will
1859  // come from libavformat.
1860  ifilter->format = par->format;
1861  ifilter->sample_rate = par->sample_rate;
1862  ifilter->channels = par->channels;
1863  ifilter->channel_layout = par->channel_layout;
1864  ifilter->width = par->width;
1865  ifilter->height = par->height;
1866  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1867  }
1868  }
1869 
1871  continue;
1872 
1873  ret = configure_filtergraph(fg);
1874  if (ret < 0) {
1875  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1876  exit_program(1);
1877  }
1878 
1879  finish_output_stream(ost);
1880  }
1881 
1882  ret = init_output_stream(ost, error, sizeof(error));
1883  if (ret < 0) {
1884  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1885  ost->file_index, ost->index, error);
1886  exit_program(1);
1887  }
1888  }
1889 
1890  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1891  continue;
1892 
1894  continue;
1895 
1896  for (;;) {
1897  const char *desc = NULL;
1898  AVPacket pkt;
1899  int pkt_size;
1900 
1901  switch (enc->codec_type) {
1902  case AVMEDIA_TYPE_AUDIO:
1903  desc = "audio";
1904  break;
1905  case AVMEDIA_TYPE_VIDEO:
1906  desc = "video";
1907  break;
1908  default:
1909  av_assert0(0);
1910  }
1911 
1912  av_init_packet(&pkt);
1913  pkt.data = NULL;
1914  pkt.size = 0;
1915 
1917 
1918  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1919  ret = avcodec_send_frame(enc, NULL);
1920  if (ret < 0) {
1921  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1922  desc,
1923  av_err2str(ret));
1924  exit_program(1);
1925  }
1926  }
1927 
1928  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1929  if (ret < 0 && ret != AVERROR_EOF) {
1930  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1931  desc,
1932  av_err2str(ret));
1933  exit_program(1);
1934  }
1935  if (ost->logfile && enc->stats_out) {
1936  fprintf(ost->logfile, "%s", enc->stats_out);
1937  }
1938  if (ret == AVERROR_EOF) {
1939  output_packet(of, &pkt, ost, 1);
1940  break;
1941  }
1942  if (ost->finished & MUXER_FINISHED) {
1943  av_packet_unref(&pkt);
1944  continue;
1945  }
1946  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1947  pkt_size = pkt.size;
1948  output_packet(of, &pkt, ost, 0);
1950  do_video_stats(ost, pkt_size);
1951  }
1952  }
1953  }
1954 }
1955 
1956 /*
1957  * Check whether a packet from ist should be written into ost at this time
1958  */
1960 {
1961  OutputFile *of = output_files[ost->file_index];
1962  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1963 
1964  if (ost->source_index != ist_index)
1965  return 0;
1966 
1967  if (ost->finished)
1968  return 0;
1969 
1970  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1971  return 0;
1972 
1973  return 1;
1974 }
1975 
1976 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1977 {
1978  OutputFile *of = output_files[ost->file_index];
1979  InputFile *f = input_files [ist->file_index];
1980  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1981  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1982  AVPacket opkt = { 0 };
1983 
1984  av_init_packet(&opkt);
1985 
1986  // EOF: flush output bitstream filters.
1987  if (!pkt) {
1988  output_packet(of, &opkt, ost, 1);
1989  return;
1990  }
1991 
1992  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1994  return;
1995 
1996  if (!ost->frame_number && !ost->copy_prior_start) {
1997  int64_t comp_start = start_time;
1998  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1999  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2000  if (pkt->pts == AV_NOPTS_VALUE ?
2001  ist->pts < comp_start :
2002  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2003  return;
2004  }
2005 
2006  if (of->recording_time != INT64_MAX &&
2007  ist->pts >= of->recording_time + start_time) {
2008  close_output_stream(ost);
2009  return;
2010  }
2011 
2012  if (f->recording_time != INT64_MAX) {
2013  start_time = f->ctx->start_time;
2014  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2015  start_time += f->start_time;
2016  if (ist->pts >= f->recording_time + start_time) {
2017  close_output_stream(ost);
2018  return;
2019  }
2020  }
2021 
2022  /* force the input stream PTS */
2023  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2024  ost->sync_opts++;
2025 
2026  if (pkt->pts != AV_NOPTS_VALUE)
2027  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2028  else
2029  opkt.pts = AV_NOPTS_VALUE;
2030 
2031  if (pkt->dts == AV_NOPTS_VALUE)
2032  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2033  else
2034  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2035  opkt.dts -= ost_tb_start_time;
2036 
2037  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2039  if(!duration)
2040  duration = ist->dec_ctx->frame_size;
2041  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2043  ost->mux_timebase) - ost_tb_start_time;
2044  }
2045 
2046  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2047 
2048  opkt.flags = pkt->flags;
2049  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2050  if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2051  && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2052  && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2053  && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2054  ) {
2055  int ret = av_parser_change(ost->parser, ost->parser_avctx,
2056  &opkt.data, &opkt.size,
2057  pkt->data, pkt->size,
2059  if (ret < 0) {
2060  av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2061  av_err2str(ret));
2062  exit_program(1);
2063  }
2064  if (ret) {
2065  opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2066  if (!opkt.buf)
2067  exit_program(1);
2068  }
2069  } else {
2070  opkt.data = pkt->data;
2071  opkt.size = pkt->size;
2072  }
2073  av_copy_packet_side_data(&opkt, pkt);
2074 
2075  output_packet(of, &opkt, ost, 0);
2076 }
2077 
2079 {
2080  AVCodecContext *dec = ist->dec_ctx;
2081 
2082  if (!dec->channel_layout) {
2083  char layout_name[256];
2084 
2085  if (dec->channels > ist->guess_layout_max)
2086  return 0;
2088  if (!dec->channel_layout)
2089  return 0;
2090  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2091  dec->channels, dec->channel_layout);
2092  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2093  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2094  }
2095  return 1;
2096 }
2097 
2098 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2099 {
2100  if (*got_output || ret<0)
2101  decode_error_stat[ret<0] ++;
2102 
2103  if (ret < 0 && exit_on_error)
2104  exit_program(1);
2105 
2106  if (exit_on_error && *got_output && ist) {
2108  av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2109  exit_program(1);
2110  }
2111  }
2112 }
2113 
2114 // Filters can be configured only if the formats of all inputs are known.
2116 {
2117  int i;
2118  for (i = 0; i < fg->nb_inputs; i++) {
2119  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2120  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2121  return 0;
2122  }
2123  return 1;
2124 }
2125 
2127 {
2128  FilterGraph *fg = ifilter->graph;
2129  int need_reinit, ret, i;
2130 
2131  /* determine if the parameters for this input changed */
2132  need_reinit = ifilter->format != frame->format;
2133  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2134  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2135  need_reinit = 1;
2136 
2137  switch (ifilter->ist->st->codecpar->codec_type) {
2138  case AVMEDIA_TYPE_AUDIO:
2139  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2140  ifilter->channels != frame->channels ||
2141  ifilter->channel_layout != frame->channel_layout;
2142  break;
2143  case AVMEDIA_TYPE_VIDEO:
2144  need_reinit |= ifilter->width != frame->width ||
2145  ifilter->height != frame->height;
2146  break;
2147  }
2148 
2149  if (need_reinit) {
2150  ret = ifilter_parameters_from_frame(ifilter, frame);
2151  if (ret < 0)
2152  return ret;
2153  }
2154 
2155  /* (re)init the graph if possible, otherwise buffer the frame and return */
2156  if (need_reinit || !fg->graph) {
2157  for (i = 0; i < fg->nb_inputs; i++) {
2158  if (!ifilter_has_all_input_formats(fg)) {
2159  AVFrame *tmp = av_frame_clone(frame);
2160  if (!tmp)
2161  return AVERROR(ENOMEM);
2162  av_frame_unref(frame);
2163 
2164  if (!av_fifo_space(ifilter->frame_queue)) {
2165  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2166  if (ret < 0) {
2167  av_frame_free(&tmp);
2168  return ret;
2169  }
2170  }
2171  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2172  return 0;
2173  }
2174  }
2175 
2176  ret = reap_filters(1);
2177  if (ret < 0 && ret != AVERROR_EOF) {
2178  char errbuf[128];
2179  av_strerror(ret, errbuf, sizeof(errbuf));
2180 
2181  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2182  return ret;
2183  }
2184 
2185  ret = configure_filtergraph(fg);
2186  if (ret < 0) {
2187  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2188  return ret;
2189  }
2190  }
2191 
2193  if (ret < 0) {
2194  if (ret != AVERROR_EOF)
2195  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2196  return ret;
2197  }
2198 
2199  return 0;
2200 }
2201 
2202 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2203 {
2204  int i, j, ret;
2205 
2206  ifilter->eof = 1;
2207 
2208  if (ifilter->filter) {
2209  ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2210  if (ret < 0)
2211  return ret;
2212  } else {
2213  // the filtergraph was never configured
2214  FilterGraph *fg = ifilter->graph;
2215  for (i = 0; i < fg->nb_inputs; i++)
2216  if (!fg->inputs[i]->eof)
2217  break;
2218  if (i == fg->nb_inputs) {
2219  // All the input streams have finished without the filtergraph
2220  // ever being configured.
2221  // Mark the output streams as finished.
2222  for (j = 0; j < fg->nb_outputs; j++)
2223  finish_output_stream(fg->outputs[j]->ost);
2224  }
2225  }
2226 
2227  return 0;
2228 }
2229 
2230 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2231 // There is the following difference: if you got a frame, you must call
2232 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2233 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2234 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2235 {
2236  int ret;
2237 
2238  *got_frame = 0;
2239 
2240  if (pkt) {
2241  ret = avcodec_send_packet(avctx, pkt);
2242  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2243  // decoded frames with avcodec_receive_frame() until done.
2244  if (ret < 0 && ret != AVERROR_EOF)
2245  return ret;
2246  }
2247 
2248  ret = avcodec_receive_frame(avctx, frame);
2249  if (ret < 0 && ret != AVERROR(EAGAIN))
2250  return ret;
2251  if (ret >= 0)
2252  *got_frame = 1;
2253 
2254  return 0;
2255 }
2256 
2257 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2258 {
2259  int i, ret;
2260  AVFrame *f;
2261 
2262  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2263  for (i = 0; i < ist->nb_filters; i++) {
2264  if (i < ist->nb_filters - 1) {
2265  f = ist->filter_frame;
2266  ret = av_frame_ref(f, decoded_frame);
2267  if (ret < 0)
2268  break;
2269  } else
2270  f = decoded_frame;
2271  ret = ifilter_send_frame(ist->filters[i], f);
2272  if (ret == AVERROR_EOF)
2273  ret = 0; /* ignore */
2274  if (ret < 0) {
2276  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2277  break;
2278  }
2279  }
2280  return ret;
2281 }
2282 
2283 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2284  int *decode_failed)
2285 {
2286  AVFrame *decoded_frame;
2287  AVCodecContext *avctx = ist->dec_ctx;
2288  int ret, err = 0;
2289  AVRational decoded_frame_tb;
2290 
2291  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2292  return AVERROR(ENOMEM);
2293  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2294  return AVERROR(ENOMEM);
2295  decoded_frame = ist->decoded_frame;
2296 
2298  ret = decode(avctx, decoded_frame, got_output, pkt);
2299  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2300  if (ret < 0)
2301  *decode_failed = 1;
2302 
2303  if (ret >= 0 && avctx->sample_rate <= 0) {
2304  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2305  ret = AVERROR_INVALIDDATA;
2306  }
2307 
2308  if (ret != AVERROR_EOF)
2309  check_decode_result(ist, got_output, ret);
2310 
2311  if (!*got_output || ret < 0)
2312  return ret;
2313 
2314  ist->samples_decoded += decoded_frame->nb_samples;
2315  ist->frames_decoded++;
2316 
2317 #if 1
2318  /* increment next_dts to use for the case where the input stream does not
2319  have timestamps or there are multiple frames in the packet */
2320  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2321  avctx->sample_rate;
2322  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2323  avctx->sample_rate;
2324 #endif
2325 
2326  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2327  decoded_frame_tb = ist->st->time_base;
2328  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2329  decoded_frame->pts = pkt->pts;
2330  decoded_frame_tb = ist->st->time_base;
2331  }else {
2332  decoded_frame->pts = ist->dts;
2333  decoded_frame_tb = AV_TIME_BASE_Q;
2334  }
2335  if (decoded_frame->pts != AV_NOPTS_VALUE)
2336  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2337  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2338  (AVRational){1, avctx->sample_rate});
2339  ist->nb_samples = decoded_frame->nb_samples;
2340  err = send_frame_to_filters(ist, decoded_frame);
2341 
2342  av_frame_unref(ist->filter_frame);
2343  av_frame_unref(decoded_frame);
2344  return err < 0 ? err : ret;
2345 }
2346 
2347 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2348  int *decode_failed)
2349 {
2350  AVFrame *decoded_frame;
2351  int i, ret = 0, err = 0;
2352  int64_t best_effort_timestamp;
2353  int64_t dts = AV_NOPTS_VALUE;
2354  AVPacket avpkt;
2355 
2356  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2357  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2358  // skip the packet.
2359  if (!eof && pkt && pkt->size == 0)
2360  return 0;
2361 
2362  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2363  return AVERROR(ENOMEM);
2364  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2365  return AVERROR(ENOMEM);
2366  decoded_frame = ist->decoded_frame;
2367  if (ist->dts != AV_NOPTS_VALUE)
2368  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2369  if (pkt) {
2370  avpkt = *pkt;
2371  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2372  }
2373 
2374  // The old code used to set dts on the drain packet, which does not work
2375  // with the new API anymore.
2376  if (eof) {
2377  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2378  if (!new)
2379  return AVERROR(ENOMEM);
2380  ist->dts_buffer = new;
2381  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2382  }
2383 
2385  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2386  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2387  if (ret < 0)
2388  *decode_failed = 1;
2389 
2390  // The following line may be required in some cases where there is no parser
2391  // or the parser does not has_b_frames correctly
2392  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2393  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2394  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2395  } else
2397  "video_delay is larger in decoder than demuxer %d > %d.\n"
2398  "If you want to help, upload a sample "
2399  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2400  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2401  ist->dec_ctx->has_b_frames,
2402  ist->st->codecpar->video_delay);
2403  }
2404 
2405  if (ret != AVERROR_EOF)
2406  check_decode_result(ist, got_output, ret);
2407 
2408  if (*got_output && ret >= 0) {
2409  if (ist->dec_ctx->width != decoded_frame->width ||
2410  ist->dec_ctx->height != decoded_frame->height ||
2411  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2412  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2413  decoded_frame->width,
2414  decoded_frame->height,
2415  decoded_frame->format,
2416  ist->dec_ctx->width,
2417  ist->dec_ctx->height,
2418  ist->dec_ctx->pix_fmt);
2419  }
2420  }
2421 
2422  if (!*got_output || ret < 0)
2423  return ret;
2424 
2425  if(ist->top_field_first>=0)
2426  decoded_frame->top_field_first = ist->top_field_first;
2427 
2428  ist->frames_decoded++;
2429 
2430  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2431  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2432  if (err < 0)
2433  goto fail;
2434  }
2435  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2436 
2437  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2438  *duration_pts = decoded_frame->pkt_duration;
2439 
2440  if (ist->framerate.num)
2441  best_effort_timestamp = ist->cfr_next_pts++;
2442 
2443  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2444  best_effort_timestamp = ist->dts_buffer[0];
2445 
2446  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2447  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2448  ist->nb_dts_buffer--;
2449  }
2450 
2451  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2452  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2453 
2454  if (ts != AV_NOPTS_VALUE)
2455  ist->next_pts = ist->pts = ts;
2456  }
2457 
2458  if (debug_ts) {
2459  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2460  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2461  ist->st->index, av_ts2str(decoded_frame->pts),
2462  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2463  best_effort_timestamp,
2464  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2465  decoded_frame->key_frame, decoded_frame->pict_type,
2466  ist->st->time_base.num, ist->st->time_base.den);
2467  }
2468 
2469  if (ist->st->sample_aspect_ratio.num)
2470  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2471 
2472  err = send_frame_to_filters(ist, decoded_frame);
2473 
2474 fail:
2476  av_frame_unref(decoded_frame);
2477  return err < 0 ? err : ret;
2478 }
2479 
2480 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2481  int *decode_failed)
2482 {
2483  AVSubtitle subtitle;
2484  int free_sub = 1;
2485  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2486  &subtitle, got_output, pkt);
2487 
2488  check_decode_result(NULL, got_output, ret);
2489 
2490  if (ret < 0 || !*got_output) {
2491  *decode_failed = 1;
2492  if (!pkt->size)
2493  sub2video_flush(ist);
2494  return ret;
2495  }
2496 
2497  if (ist->fix_sub_duration) {
2498  int end = 1;
2499  if (ist->prev_sub.got_output) {
2500  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2501  1000, AV_TIME_BASE);
2502  if (end < ist->prev_sub.subtitle.end_display_time) {
2503  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2504  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2506  end <= 0 ? ", dropping it" : "");
2508  }
2509  }
2510  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2511  FFSWAP(int, ret, ist->prev_sub.ret);
2512  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2513  if (end <= 0)
2514  goto out;
2515  }
2516 
2517  if (!*got_output)
2518  return ret;
2519 
2520  if (ist->sub2video.frame) {
2521  sub2video_update(ist, &subtitle);
2522  } else if (ist->nb_filters) {
2523  if (!ist->sub2video.sub_queue)
2524  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2525  if (!ist->sub2video.sub_queue)
2526  exit_program(1);
2527  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2529  if (ret < 0)
2530  exit_program(1);
2531  }
2532  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2533  free_sub = 0;
2534  }
2535 
2536  if (!subtitle.num_rects)
2537  goto out;
2538 
2539  ist->frames_decoded++;
2540 
2541  for (i = 0; i < nb_output_streams; i++) {
2542  OutputStream *ost = output_streams[i];
2543 
2544  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2545  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2546  continue;
2547 
2548  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2549  }
2550 
2551 out:
2552  if (free_sub)
2553  avsubtitle_free(&subtitle);
2554  return ret;
2555 }
2556 
2558 {
2559  int i, ret;
2560  /* TODO keep pts also in stream time base to avoid converting back */
2561  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2563 
2564  for (i = 0; i < ist->nb_filters; i++) {
2565  ret = ifilter_send_eof(ist->filters[i], pts);
2566  if (ret < 0)
2567  return ret;
2568  }
2569  return 0;
2570 }
2571 
2572 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2573 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2574 {
2575  int ret = 0, i;
2576  int repeating = 0;
2577  int eof_reached = 0;
2578 
2579  AVPacket avpkt;
2580  if (!ist->saw_first_ts) {
2581  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2582  ist->pts = 0;
2583  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2584  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2585  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2586  }
2587  ist->saw_first_ts = 1;
2588  }
2589 
2590  if (ist->next_dts == AV_NOPTS_VALUE)
2591  ist->next_dts = ist->dts;
2592  if (ist->next_pts == AV_NOPTS_VALUE)
2593  ist->next_pts = ist->pts;
2594 
2595  if (!pkt) {
2596  /* EOF handling */
2597  av_init_packet(&avpkt);
2598  avpkt.data = NULL;
2599  avpkt.size = 0;
2600  } else {
2601  avpkt = *pkt;
2602  }
2603 
2604  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2605  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2606  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2607  ist->next_pts = ist->pts = ist->dts;
2608  }
2609 
2610  // while we have more to decode or while the decoder did output something on EOF
2611  while (ist->decoding_needed) {
2612  int64_t duration_dts = 0;
2613  int64_t duration_pts = 0;
2614  int got_output = 0;
2615  int decode_failed = 0;
2616 
2617  ist->pts = ist->next_pts;
2618  ist->dts = ist->next_dts;
2619 
2620  switch (ist->dec_ctx->codec_type) {
2621  case AVMEDIA_TYPE_AUDIO:
2622  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2623  &decode_failed);
2624  break;
2625  case AVMEDIA_TYPE_VIDEO:
2626  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2627  &decode_failed);
2628  if (!repeating || !pkt || got_output) {
2629  if (pkt && pkt->duration) {
2630  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2631  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2633  duration_dts = ((int64_t)AV_TIME_BASE *
2634  ist->dec_ctx->framerate.den * ticks) /
2636  }
2637 
2638  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2639  ist->next_dts += duration_dts;
2640  }else
2641  ist->next_dts = AV_NOPTS_VALUE;
2642  }
2643 
2644  if (got_output) {
2645  if (duration_pts > 0) {
2646  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2647  } else {
2648  ist->next_pts += duration_dts;
2649  }
2650  }
2651  break;
2652  case AVMEDIA_TYPE_SUBTITLE:
2653  if (repeating)
2654  break;
2655  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2656  if (!pkt && ret >= 0)
2657  ret = AVERROR_EOF;
2658  break;
2659  default:
2660  return -1;
2661  }
2662 
2663  if (ret == AVERROR_EOF) {
2664  eof_reached = 1;
2665  break;
2666  }
2667 
2668  if (ret < 0) {
2669  if (decode_failed) {
2670  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2671  ist->file_index, ist->st->index, av_err2str(ret));
2672  } else {
2673  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2674  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2675  }
2676  if (!decode_failed || exit_on_error)
2677  exit_program(1);
2678  break;
2679  }
2680 
2681  if (got_output)
2682  ist->got_output = 1;
2683 
2684  if (!got_output)
2685  break;
2686 
2687  // During draining, we might get multiple output frames in this loop.
2688  // ffmpeg.c does not drain the filter chain on configuration changes,
2689  // which means if we send multiple frames at once to the filters, and
2690  // one of those frames changes configuration, the buffered frames will
2691  // be lost. This can upset certain FATE tests.
2692  // Decode only 1 frame per call on EOF to appease these FATE tests.
2693  // The ideal solution would be to rewrite decoding to use the new
2694  // decoding API in a better way.
2695  if (!pkt)
2696  break;
2697 
2698  repeating = 1;
2699  }
2700 
2701  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2702  /* except when looping we need to flush but not to send an EOF */
2703  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2704  int ret = send_filter_eof(ist);
2705  if (ret < 0) {
2706  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2707  exit_program(1);
2708  }
2709  }
2710 
2711  /* handle stream copy */
2712  if (!ist->decoding_needed && pkt) {
2713  ist->dts = ist->next_dts;
2714  switch (ist->dec_ctx->codec_type) {
2715  case AVMEDIA_TYPE_AUDIO:
2716  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2717  ist->dec_ctx->sample_rate;
2718  break;
2719  case AVMEDIA_TYPE_VIDEO:
2720  if (ist->framerate.num) {
2721  // TODO: Remove work-around for c99-to-c89 issue 7
2722  AVRational time_base_q = AV_TIME_BASE_Q;
2723  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2724  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2725  } else if (pkt->duration) {
2726  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2727  } else if(ist->dec_ctx->framerate.num != 0) {
2728  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2729  ist->next_dts += ((int64_t)AV_TIME_BASE *
2730  ist->dec_ctx->framerate.den * ticks) /
2732  }
2733  break;
2734  }
2735  ist->pts = ist->dts;
2736  ist->next_pts = ist->next_dts;
2737  }
2738  for (i = 0; i < nb_output_streams; i++) {
2739  OutputStream *ost = output_streams[i];
2740 
2741  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2742  continue;
2743 
2744  do_streamcopy(ist, ost, pkt);
2745  }
2746 
2747  return !eof_reached;
2748 }
2749 
2750 static void print_sdp(void)
2751 {
2752  char sdp[16384];
2753  int i;
2754  int j;
2755  AVIOContext *sdp_pb;
2756  AVFormatContext **avc;
2757 
2758  for (i = 0; i < nb_output_files; i++) {
2759  if (!output_files[i]->header_written)
2760  return;
2761  }
2762 
2763  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2764  if (!avc)
2765  exit_program(1);
2766  for (i = 0, j = 0; i < nb_output_files; i++) {
2767  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2768  avc[j] = output_files[i]->ctx;
2769  j++;
2770  }
2771  }
2772 
2773  if (!j)
2774  goto fail;
2775 
2776  av_sdp_create(avc, j, sdp, sizeof(sdp));
2777 
2778  if (!sdp_filename) {
2779  printf("SDP:\n%s\n", sdp);
2780  fflush(stdout);
2781  } else {
2782  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2783  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2784  } else {
2785  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2786  avio_closep(&sdp_pb);
2788  }
2789  }
2790 
2791 fail:
2792  av_freep(&avc);
2793 }
2794 
2796 {
2797  InputStream *ist = s->opaque;
2798  const enum AVPixelFormat *p;
2799  int ret;
2800 
2801  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2803  const AVCodecHWConfig *config = NULL;
2804  int i;
2805 
2806  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2807  break;
2808 
2809  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2810  ist->hwaccel_id == HWACCEL_AUTO) {
2811  for (i = 0;; i++) {
2812  config = avcodec_get_hw_config(s->codec, i);
2813  if (!config)
2814  break;
2815  if (!(config->methods &
2817  continue;
2818  if (config->pix_fmt == *p)
2819  break;
2820  }
2821  }
2822  if (config) {
2823  if (config->device_type != ist->hwaccel_device_type) {
2824  // Different hwaccel offered, ignore.
2825  continue;
2826  }
2827 
2828  ret = hwaccel_decode_init(s);
2829  if (ret < 0) {
2830  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2832  "%s hwaccel requested for input stream #%d:%d, "
2833  "but cannot be initialized.\n",
2835  ist->file_index, ist->st->index);
2836  return AV_PIX_FMT_NONE;
2837  }
2838  continue;
2839  }
2840  } else {
2841  const HWAccel *hwaccel = NULL;
2842  int i;
2843  for (i = 0; hwaccels[i].name; i++) {
2844  if (hwaccels[i].pix_fmt == *p) {
2845  hwaccel = &hwaccels[i];
2846  break;
2847  }
2848  }
2849  if (!hwaccel) {
2850  // No hwaccel supporting this pixfmt.
2851  continue;
2852  }
2853  if (hwaccel->id != ist->hwaccel_id) {
2854  // Does not match requested hwaccel.
2855  continue;
2856  }
2857 
2858  ret = hwaccel->init(s);
2859  if (ret < 0) {
2861  "%s hwaccel requested for input stream #%d:%d, "
2862  "but cannot be initialized.\n", hwaccel->name,
2863  ist->file_index, ist->st->index);
2864  return AV_PIX_FMT_NONE;
2865  }
2866  }
2867 
2868  if (ist->hw_frames_ctx) {
2870  if (!s->hw_frames_ctx)
2871  return AV_PIX_FMT_NONE;
2872  }
2873 
2874  ist->hwaccel_pix_fmt = *p;
2875  break;
2876  }
2877 
2878  return *p;
2879 }
2880 
2882 {
2883  InputStream *ist = s->opaque;
2884 
2885  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2886  return ist->hwaccel_get_buffer(s, frame, flags);
2887 
2888  return avcodec_default_get_buffer2(s, frame, flags);
2889 }
2890 
2891 static int init_input_stream(int ist_index, char *error, int error_len)
2892 {
2893  int ret;
2894  InputStream *ist = input_streams[ist_index];
2895 
2896  if (ist->decoding_needed) {
2897  AVCodec *codec = ist->dec;
2898  if (!codec) {
2899  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2900  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2901  return AVERROR(EINVAL);
2902  }
2903 
2904  ist->dec_ctx->opaque = ist;
2905  ist->dec_ctx->get_format = get_format;
2906  ist->dec_ctx->get_buffer2 = get_buffer;
2907  ist->dec_ctx->thread_safe_callbacks = 1;
2908 
2909  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2910  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2911  (ist->decoding_needed & DECODING_FOR_OST)) {
2912  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2914  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2915  }
2916 
2917  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2918 
2919  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2920  * audio, and video decoders such as cuvid or mediacodec */
2921  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2922 
2923  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2924  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2925  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2927  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2928 
2929  ret = hw_device_setup_for_decode(ist);
2930  if (ret < 0) {
2931  snprintf(error, error_len, "Device setup failed for "
2932  "decoder on input stream #%d:%d : %s",
2933  ist->file_index, ist->st->index, av_err2str(ret));
2934  return ret;
2935  }
2936 
2937  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2938  if (ret == AVERROR_EXPERIMENTAL)
2939  abort_codec_experimental(codec, 0);
2940 
2941  snprintf(error, error_len,
2942  "Error while opening decoder for input stream "
2943  "#%d:%d : %s",
2944  ist->file_index, ist->st->index, av_err2str(ret));
2945  return ret;
2946  }
2948  }
2949 
2950  ist->next_pts = AV_NOPTS_VALUE;
2951  ist->next_dts = AV_NOPTS_VALUE;
2952 
2953  return 0;
2954 }
2955 
2957 {
2958  if (ost->source_index >= 0)
2959  return input_streams[ost->source_index];
2960  return NULL;
2961 }
2962 
2963 static int compare_int64(const void *a, const void *b)
2964 {
2965  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2966 }
2967 
2968 /* open the muxer when all the streams are initialized */
2969 static int check_init_output_file(OutputFile *of, int file_index)
2970 {
2971  int ret, i;
2972 
2973  for (i = 0; i < of->ctx->nb_streams; i++) {
2974  OutputStream *ost = output_streams[of->ost_index + i];
2975  if (!ost->initialized)
2976  return 0;
2977  }
2978 
2979  of->ctx->interrupt_callback = int_cb;
2980 
2981  ret = avformat_write_header(of->ctx, &of->opts);
2982  if (ret < 0) {
2984  "Could not write header for output file #%d "
2985  "(incorrect codec parameters ?): %s\n",
2986  file_index, av_err2str(ret));
2987  return ret;
2988  }
2989  //assert_avoptions(of->opts);
2990  of->header_written = 1;
2991 
2992  av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2993 
2994  if (sdp_filename || want_sdp)
2995  print_sdp();
2996 
2997  /* flush the muxing queues */
2998  for (i = 0; i < of->ctx->nb_streams; i++) {
2999  OutputStream *ost = output_streams[of->ost_index + i];
3000 
3001  /* try to improve muxing time_base (only possible if nothing has been written yet) */
3002  if (!av_fifo_size(ost->muxing_queue))
3003  ost->mux_timebase = ost->st->time_base;
3004 
3005  while (av_fifo_size(ost->muxing_queue)) {
3006  AVPacket pkt;
3007  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3008  write_packet(of, &pkt, ost, 1);
3009  }
3010  }
3011 
3012  return 0;
3013 }
3014 
3016 {
3017  AVBSFContext *ctx;
3018  int i, ret;
3019 
3020  if (!ost->nb_bitstream_filters)
3021  return 0;
3022 
3023  for (i = 0; i < ost->nb_bitstream_filters; i++) {
3024  ctx = ost->bsf_ctx[i];
3025 
3026  ret = avcodec_parameters_copy(ctx->par_in,
3027  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3028  if (ret < 0)
3029  return ret;
3030 
3031  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3032 
3033  ret = av_bsf_init(ctx);
3034  if (ret < 0) {
3035  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3036  ost->bsf_ctx[i]->filter->name);
3037  return ret;
3038  }
3039  }
3040 
3041  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3042  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3043  if (ret < 0)
3044  return ret;
3045 
3046  ost->st->time_base = ctx->time_base_out;
3047 
3048  return 0;
3049 }
3050 
3052 {
3053  OutputFile *of = output_files[ost->file_index];
3054  InputStream *ist = get_input_stream(ost);
3055  AVCodecParameters *par_dst = ost->st->codecpar;
3056  AVCodecParameters *par_src = ost->ref_par;
3057  AVRational sar;
3058  int i, ret;
3059  uint32_t codec_tag = par_dst->codec_tag;
3060 
3061  av_assert0(ist && !ost->filter);
3062 
3063  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3064  if (ret >= 0)
3065  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3066  if (ret < 0) {
3068  "Error setting up codec context options.\n");
3069  return ret;
3070  }
3072 
3073  if (!codec_tag) {
3074  unsigned int codec_tag_tmp;
3075  if (!of->ctx->oformat->codec_tag ||
3076  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3077  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3078  codec_tag = par_src->codec_tag;
3079  }
3080 
3081  ret = avcodec_parameters_copy(par_dst, par_src);
3082  if (ret < 0)
3083  return ret;
3084 
3085  par_dst->codec_tag = codec_tag;
3086 
3087  if (!ost->frame_rate.num)
3088  ost->frame_rate = ist->framerate;
3089  ost->st->avg_frame_rate = ost->frame_rate;
3090 
3092  if (ret < 0)
3093  return ret;
3094 
3095  // copy timebase while removing common factors
3096  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3098 
3099  // copy estimated duration as a hint to the muxer
3100  if (ost->st->duration <= 0 && ist->st->duration > 0)
3101  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3102 
3103  // copy disposition
3104  ost->st->disposition = ist->st->disposition;
3105 
3106  if (ist->st->nb_side_data) {
3107  for (i = 0; i < ist->st->nb_side_data; i++) {
3108  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3109  uint8_t *dst_data;
3110 
3111  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3112  if (!dst_data)
3113  return AVERROR(ENOMEM);
3114  memcpy(dst_data, sd_src->data, sd_src->size);
3115  }
3116  }
3117 
3118  if (ost->rotate_overridden) {
3120  sizeof(int32_t) * 9);
3121  if (sd)
3122  av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3123  }
3124 
3125  ost->parser = av_parser_init(par_dst->codec_id);
3126  ost->parser_avctx = avcodec_alloc_context3(NULL);
3127  if (!ost->parser_avctx)
3128  return AVERROR(ENOMEM);
3129 
3130  switch (par_dst->codec_type) {
3131  case AVMEDIA_TYPE_AUDIO:
3132  if (audio_volume != 256) {
3133  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3134  exit_program(1);
3135  }
3136  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3137  par_dst->block_align= 0;
3138  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3139  par_dst->block_align= 0;
3140  break;
3141  case AVMEDIA_TYPE_VIDEO:
3142  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3143  sar =
3144  av_mul_q(ost->frame_aspect_ratio,
3145  (AVRational){ par_dst->height, par_dst->width });
3146  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3147  "with stream copy may produce invalid files\n");
3148  }
3149  else if (ist->st->sample_aspect_ratio.num)
3150  sar = ist->st->sample_aspect_ratio;
3151  else
3152  sar = par_src->sample_aspect_ratio;
3153  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3154  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3155  ost->st->r_frame_rate = ist->st->r_frame_rate;
3156  break;
3157  }
3158 
3159  ost->mux_timebase = ist->st->time_base;
3160 
3161  return 0;
3162 }
3163 
3165 {
3166  AVDictionaryEntry *e;
3167 
3168  uint8_t *encoder_string;
3169  int encoder_string_len;
3170  int format_flags = 0;
3171  int codec_flags = ost->enc_ctx->flags;
3172 
3173  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3174  return;
3175 
3176  e = av_dict_get(of->opts, "fflags", NULL, 0);
3177  if (e) {
3178  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3179  if (!o)
3180  return;
3181  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3182  }
3183  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3184  if (e) {
3185  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3186  if (!o)
3187  return;
3188  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3189  }
3190 
3191  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3192  encoder_string = av_mallocz(encoder_string_len);
3193  if (!encoder_string)
3194  exit_program(1);
3195 
3196  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3197  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3198  else
3199  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3200  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3201  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3203 }
3204 
3205 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3206  AVCodecContext *avctx)
3207 {
3208  char *p;
3209  int n = 1, i, size, index = 0;
3210  int64_t t, *pts;
3211 
3212  for (p = kf; *p; p++)
3213  if (*p == ',')
3214  n++;
3215  size = n;
3216  pts = av_malloc_array(size, sizeof(*pts));
3217  if (!pts) {
3218  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3219  exit_program(1);
3220  }
3221 
3222  p = kf;
3223  for (i = 0; i < n; i++) {
3224  char *next = strchr(p, ',');
3225 
3226  if (next)
3227  *next++ = 0;
3228 
3229  if (!memcmp(p, "chapters", 8)) {
3230 
3231  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3232  int j;
3233 
3234  if (avf->nb_chapters > INT_MAX - size ||
3235  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3236  sizeof(*pts)))) {
3238  "Could not allocate forced key frames array.\n");
3239  exit_program(1);
3240  }
3241  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3242  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3243 
3244  for (j = 0; j < avf->nb_chapters; j++) {
3245  AVChapter *c = avf->chapters[j];
3246  av_assert1(index < size);
3247  pts[index++] = av_rescale_q(c->start, c->time_base,
3248  avctx->time_base) + t;
3249  }
3250 
3251  } else {
3252 
3253  t = parse_time_or_die("force_key_frames", p, 1);
3254  av_assert1(index < size);
3255  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3256 
3257  }
3258 
3259  p = next;
3260  }
3261 
3262  av_assert0(index == size);
3263  qsort(pts, size, sizeof(*pts), compare_int64);
3264  ost->forced_kf_count = size;
3265  ost->forced_kf_pts = pts;
3266 }
3267 
3268 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3269 {
3270  InputStream *ist = get_input_stream(ost);
3271  AVCodecContext *enc_ctx = ost->enc_ctx;
3272  AVFormatContext *oc;
3273 
3274  if (ost->enc_timebase.num > 0) {
3275  enc_ctx->time_base = ost->enc_timebase;
3276  return;
3277  }
3278 
3279  if (ost->enc_timebase.num < 0) {
3280  if (ist) {
3281  enc_ctx->time_base = ist->st->time_base;
3282  return;
3283  }
3284 
3285  oc = output_files[ost->file_index]->ctx;
3286  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3287  }
3288 
3289  enc_ctx->time_base = default_time_base;
3290 }
3291 
3293 {
3294  InputStream *ist = get_input_stream(ost);
3295  AVCodecContext *enc_ctx = ost->enc_ctx;
3297  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3298  int j, ret;
3299 
3300  set_encoder_id(output_files[ost->file_index], ost);
3301 
3302  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3303  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3304  // which have to be filtered out to prevent leaking them to output files.
3305  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3306 
3307  if (ist) {
3308  ost->st->disposition = ist->st->disposition;
3309 
3310  dec_ctx = ist->dec_ctx;
3311 
3312  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3313  } else {
3314  for (j = 0; j < oc->nb_streams; j++) {
3315  AVStream *st = oc->streams[j];
3316  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3317  break;
3318  }
3319  if (j == oc->nb_streams)
3320  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3323  }
3324 
3325  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3326  if (!ost->frame_rate.num)
3328  if (ist && !ost->frame_rate.num)
3329  ost->frame_rate = ist->framerate;
3330  if (ist && !ost->frame_rate.num)
3331  ost->frame_rate = ist->st->r_frame_rate;
3332  if (ist && !ost->frame_rate.num) {
3333  ost->frame_rate = (AVRational){25, 1};
3335  "No information "
3336  "about the input framerate is available. Falling "
3337  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3338  "if you want a different framerate.\n",
3339  ost->file_index, ost->index);
3340  }
3341 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3342  if (ost->enc->supported_framerates && !ost->force_fps) {
3343  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3344  ost->frame_rate = ost->enc->supported_framerates[idx];
3345  }
3346  // reduce frame rate for mpeg4 to be within the spec limits
3347  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3348  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3349  ost->frame_rate.num, ost->frame_rate.den, 65535);
3350  }
3351  }
3352 
3353  switch (enc_ctx->codec_type) {
3354  case AVMEDIA_TYPE_AUDIO:
3356  if (dec_ctx)
3357  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3358  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3362 
3363  init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3364  break;
3365 
3366  case AVMEDIA_TYPE_VIDEO:
3368 
3369  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3371  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3373  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3374  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3375  }
3376  for (j = 0; j < ost->forced_kf_count; j++)
3377  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3379  enc_ctx->time_base);
3380 
3381  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3382  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3383  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3384  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3385  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3387 
3388  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3389  if (dec_ctx)
3390  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3391  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3392 
3393  enc_ctx->framerate = ost->frame_rate;
3394 
3395  ost->st->avg_frame_rate = ost->frame_rate;
3396 
3397  if (!dec_ctx ||
3398  enc_ctx->width != dec_ctx->width ||
3399  enc_ctx->height != dec_ctx->height ||
3400  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3402  }
3403 
3404  if (ost->forced_keyframes) {
3405  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3408  if (ret < 0) {
3410  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3411  return ret;
3412  }
3417 
3418  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3419  // parse it only for static kf timings
3420  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3422  }
3423  }
3424  break;
3425  case AVMEDIA_TYPE_SUBTITLE:
3426  enc_ctx->time_base = AV_TIME_BASE_Q;
3427  if (!enc_ctx->width) {
3428  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3429  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3430  }
3431  break;
3432  case AVMEDIA_TYPE_DATA:
3433  break;
3434  default:
3435  abort();
3436  break;
3437  }
3438 
3439  ost->mux_timebase = enc_ctx->time_base;
3440 
3441  return 0;
3442 }
3443 
3444 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3445 {
3446  int ret = 0;
3447 
3448  if (ost->encoding_needed) {
3449  AVCodec *codec = ost->enc;
3450  AVCodecContext *dec = NULL;
3451  InputStream *ist;
3452 
3453  ret = init_output_stream_encode(ost);
3454  if (ret < 0)
3455  return ret;
3456 
3457  if ((ist = get_input_stream(ost)))
3458  dec = ist->dec_ctx;
3459  if (dec && dec->subtitle_header) {
3460  /* ASS code assumes this buffer is null terminated so add extra byte. */
3462  if (!ost->enc_ctx->subtitle_header)
3463  return AVERROR(ENOMEM);
3464  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3466  }
3467  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3468  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3469  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3470  !codec->defaults &&
3471  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3472  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3473  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3474 
3475  if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3479  if (!ost->enc_ctx->hw_frames_ctx)
3480  return AVERROR(ENOMEM);
3481  } else {
3482  ret = hw_device_setup_for_encode(ost);
3483  if (ret < 0) {
3484  snprintf(error, error_len, "Device setup failed for "
3485  "encoder on output stream #%d:%d : %s",
3486  ost->file_index, ost->index, av_err2str(ret));
3487  return ret;
3488  }
3489  }
3490 
3491  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3492  if (ret == AVERROR_EXPERIMENTAL)
3493  abort_codec_experimental(codec, 1);
3494  snprintf(error, error_len,
3495  "Error while opening encoder for output stream #%d:%d - "
3496  "maybe incorrect parameters such as bit_rate, rate, width or height",
3497  ost->file_index, ost->index);
3498  return ret;
3499  }
3500  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3501  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3503  ost->enc_ctx->frame_size);
3505  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3506  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3507  " It takes bits/s as argument, not kbits/s\n");
3508 
3510  if (ret < 0) {
3512  "Error initializing the output stream codec context.\n");
3513  exit_program(1);
3514  }
3515  /*
3516  * FIXME: ost->st->codec should't be needed here anymore.
3517  */
3518  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3519  if (ret < 0)
3520  return ret;
3521 
3522  if (ost->enc_ctx->nb_coded_side_data) {
3523  int i;
3524 
3525  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3526  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3527  uint8_t *dst_data;
3528 
3529  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3530  if (!dst_data)
3531  return AVERROR(ENOMEM);
3532  memcpy(dst_data, sd_src->data, sd_src->size);
3533  }
3534  }
3535 
3536  /*
3537  * Add global input side data. For now this is naive, and copies it
3538  * from the input stream's global side data. All side data should
3539  * really be funneled over AVFrame and libavfilter, then added back to
3540  * packet side data, and then potentially using the first packet for
3541  * global side data.
3542  */
3543  if (ist) {
3544  int i;
3545  for (i = 0; i < ist->st->nb_side_data; i++) {
3546  AVPacketSideData *sd = &ist->st->side_data[i];
3547  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3548  if (!dst)
3549  return AVERROR(ENOMEM);
3550  memcpy(dst, sd->data, sd->size);
3551  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3552  av_display_rotation_set((uint32_t *)dst, 0);
3553  }
3554  }
3555 
3556  // copy timebase while removing common factors
3557  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3558  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3559 
3560  // copy estimated duration as a hint to the muxer
3561  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3562  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3563 
3564  ost->st->codec->codec= ost->enc_ctx->codec;
3565  } else if (ost->stream_copy) {
3566  ret = init_output_stream_streamcopy(ost);
3567  if (ret < 0)
3568  return ret;
3569 
3570  /*
3571  * FIXME: will the codec context used by the parser during streamcopy
3572  * This should go away with the new parser API.
3573  */
3574  ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3575  if (ret < 0)
3576  return ret;
3577  }
3578 
3579  // parse user provided disposition, and update stream values
3580  if (ost->disposition) {
3581  static const AVOption opts[] = {
3582  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3583  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3584  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3585  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3586  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3587  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3588  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3589  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3590  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3591  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3592  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3593  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3594  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3595  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3596  { NULL },
3597  };
3598  static const AVClass class = {
3599  .class_name = "",
3600  .item_name = av_default_item_name,
3601  .option = opts,
3602  .version = LIBAVUTIL_VERSION_INT,
3603  };
3604  const AVClass *pclass = &class;
3605 
3606  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3607  if (ret < 0)
3608  return ret;
3609  }
3610 
3611  /* initialize bitstream filters for the output stream
3612  * needs to be done here, because the codec id for streamcopy is not
3613  * known until now */
3614  ret = init_output_bsfs(ost);
3615  if (ret < 0)
3616  return ret;
3617 
3618  ost->initialized = 1;
3619 
3620  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3621  if (ret < 0)
3622  return ret;
3623 
3624  return ret;
3625 }
3626 
3627 static void report_new_stream(int input_index, AVPacket *pkt)
3628 {
3629  InputFile *file = input_files[input_index];
3630  AVStream *st = file->ctx->streams[pkt->stream_index];
3631 
3632  if (pkt->stream_index < file->nb_streams_warn)
3633  return;
3634  av_log(file->ctx, AV_LOG_WARNING,
3635  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3637  input_index, pkt->stream_index,
3638  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3639  file->nb_streams_warn = pkt->stream_index + 1;
3640 }
3641 
3642 static int transcode_init(void)
3643 {
3644  int ret = 0, i, j, k;
3645  AVFormatContext *oc;
3646  OutputStream *ost;
3647  InputStream *ist;
3648  char error[1024] = {0};
3649 
3650  for (i = 0; i < nb_filtergraphs; i++) {
3651  FilterGraph *fg = filtergraphs[i];
3652  for (j = 0; j < fg->nb_outputs; j++) {
3653  OutputFilter *ofilter = fg->outputs[j];
3654  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3655  continue;
3656  if (fg->nb_inputs != 1)
3657  continue;
3658  for (k = nb_input_streams-1; k >= 0 ; k--)
3659  if (fg->inputs[0]->ist == input_streams[k])
3660  break;
3661  ofilter->ost->source_index = k;
3662  }
3663  }
3664 
3665  /* init framerate emulation */
3666  for (i = 0; i < nb_input_files; i++) {
3667  InputFile *ifile = input_files[i];
3668  if (ifile->rate_emu)
3669  for (j = 0; j < ifile->nb_streams; j++)
3670  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3671  }
3672 
3673  /* init input streams */
3674  for (i = 0; i < nb_input_streams; i++)
3675  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3676  for (i = 0; i < nb_output_streams; i++) {
3677  ost = output_streams[i];
3678  avcodec_close(ost->enc_ctx);
3679  }
3680  goto dump_format;
3681  }
3682 
3683  /* open each encoder */
3684  for (i = 0; i < nb_output_streams; i++) {
3685  // skip streams fed from filtergraphs until we have a frame for them
3686  if (output_streams[i]->filter)
3687  continue;
3688 
3689  ret = init_output_stream(output_streams[i], error, sizeof(error));
3690  if (ret < 0)
3691  goto dump_format;
3692  }
3693 
3694  /* discard unused programs */
3695  for (i = 0; i < nb_input_files; i++) {
3696  InputFile *ifile = input_files[i];
3697  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3698  AVProgram *p = ifile->ctx->programs[j];
3699  int discard = AVDISCARD_ALL;
3700 
3701  for (k = 0; k < p->nb_stream_indexes; k++)
3702  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3703  discard = AVDISCARD_DEFAULT;
3704  break;
3705  }
3706  p->discard = discard;
3707  }
3708  }
3709 
3710  /* write headers for files with no streams */
3711  for (i = 0; i < nb_output_files; i++) {
3712  oc = output_files[i]->ctx;
3713  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3714  ret = check_init_output_file(output_files[i], i);
3715  if (ret < 0)
3716  goto dump_format;
3717  }
3718  }
3719 
3720  dump_format:
3721  /* dump the stream mapping */
3722  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3723  for (i = 0; i < nb_input_streams; i++) {
3724  ist = input_streams[i];
3725 
3726  for (j = 0; j < ist->nb_filters; j++) {
3727  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3728  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3729  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3730  ist->filters[j]->name);
3731  if (nb_filtergraphs > 1)
3732  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3733  av_log(NULL, AV_LOG_INFO, "\n");
3734  }
3735  }
3736  }
3737 
3738  for (i = 0; i < nb_output_streams; i++) {
3739  ost = output_streams[i];
3740 
3741  if (ost->attachment_filename) {
3742  /* an attached file */
3743  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3744  ost->attachment_filename, ost->file_index, ost->index);
3745  continue;
3746  }
3747 
3748  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3749  /* output from a complex graph */
3750  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3751  if (nb_filtergraphs > 1)
3752  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3753 
3754  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3755  ost->index, ost->enc ? ost->enc->name : "?");
3756  continue;
3757  }
3758 
3759  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3760  input_streams[ost->source_index]->file_index,
3761  input_streams[ost->source_index]->st->index,
3762  ost->file_index,
3763  ost->index);
3764  if (ost->sync_ist != input_streams[ost->source_index])
3765  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3766  ost->sync_ist->file_index,
3767  ost->sync_ist->st->index);
3768  if (ost->stream_copy)
3769  av_log(NULL, AV_LOG_INFO, " (copy)");
3770  else {
3771  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3772  const AVCodec *out_codec = ost->enc;
3773  const char *decoder_name = "?";
3774  const char *in_codec_name = "?";
3775  const char *encoder_name = "?";
3776  const char *out_codec_name = "?";
3777  const AVCodecDescriptor *desc;
3778 
3779  if (in_codec) {
3780  decoder_name = in_codec->name;
3781  desc = avcodec_descriptor_get(in_codec->id);
3782  if (desc)
3783  in_codec_name = desc->name;
3784  if (!strcmp(decoder_name, in_codec_name))
3785  decoder_name = "native";
3786  }
3787 
3788  if (out_codec) {
3789  encoder_name = out_codec->name;
3790  desc = avcodec_descriptor_get(out_codec->id);
3791  if (desc)
3792  out_codec_name = desc->name;
3793  if (!strcmp(encoder_name, out_codec_name))
3794  encoder_name = "native";
3795  }
3796 
3797  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3798  in_codec_name, decoder_name,
3799  out_codec_name, encoder_name);
3800  }
3801  av_log(NULL, AV_LOG_INFO, "\n");
3802  }
3803 
3804  if (ret) {
3805  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3806  return ret;
3807  }
3808 
3810 
3811  return 0;
3812 }
3813 
3814 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3815 static int need_output(void)
3816 {
3817  int i;
3818 
3819  for (i = 0; i < nb_output_streams; i++) {
3820  OutputStream *ost = output_streams[i];
3821  OutputFile *of = output_files[ost->file_index];
3822  AVFormatContext *os = output_files[ost->file_index]->ctx;
3823 
3824  if (ost->finished ||
3825  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3826  continue;
3827  if (ost->frame_number >= ost->max_frames) {
3828  int j;
3829  for (j = 0; j < of->ctx->nb_streams; j++)
3830  close_output_stream(output_streams[of->ost_index + j]);
3831  continue;
3832  }
3833 
3834  return 1;
3835  }
3836 
3837  return 0;
3838 }
3839 
3840 /**
3841  * Select the output stream to process.
3842  *
3843  * @return selected output stream, or NULL if none available
3844  */
3846 {
3847  int i;
3848  int64_t opts_min = INT64_MAX;
3849  OutputStream *ost_min = NULL;
3850 
3851  for (i = 0; i < nb_output_streams; i++) {
3852  OutputStream *ost = output_streams[i];
3853  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3854  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3855  AV_TIME_BASE_Q);
3856  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3857  av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3858 
3859  if (!ost->initialized && !ost->inputs_done)
3860  return ost;
3861 
3862  if (!ost->finished && opts < opts_min) {
3863  opts_min = opts;
3864  ost_min = ost->unavailable ? NULL : ost;
3865  }
3866  }
3867  return ost_min;
3868 }
3869 
3870 static void set_tty_echo(int on)
3871 {
3872 #if HAVE_TERMIOS_H
3873  struct termios tty;
3874  if (tcgetattr(0, &tty) == 0) {
3875  if (on) tty.c_lflag |= ECHO;
3876  else tty.c_lflag &= ~ECHO;
3877  tcsetattr(0, TCSANOW, &tty);
3878  }
3879 #endif
3880 }
3881 
3883 {
3884  int i, ret, key;
3885  static int64_t last_time;
3886  if (received_nb_signals)
3887  return AVERROR_EXIT;
3888  /* read_key() returns 0 on EOF */
3889  if(cur_time - last_time >= 100000 && !run_as_daemon){
3890  key = read_key();
3891  last_time = cur_time;
3892  }else
3893  key = -1;
3894  if (key == 'q')
3895  return AVERROR_EXIT;
3896  if (key == '+') av_log_set_level(av_log_get_level()+10);
3897  if (key == '-') av_log_set_level(av_log_get_level()-10);
3898  if (key == 's') qp_hist ^= 1;
3899  if (key == 'h'){
3900  if (do_hex_dump){
3901  do_hex_dump = do_pkt_dump = 0;
3902  } else if(do_pkt_dump){
3903  do_hex_dump = 1;
3904  } else
3905  do_pkt_dump = 1;
3907  }
3908  if (key == 'c' || key == 'C'){
3909  char buf[4096], target[64], command[256], arg[256] = {0};
3910  double time;
3911  int k, n = 0;
3912  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3913  i = 0;
3914  set_tty_echo(1);
3915  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3916  if (k > 0)
3917  buf[i++] = k;
3918  buf[i] = 0;
3919  set_tty_echo(0);
3920  fprintf(stderr, "\n");
3921  if (k > 0 &&
3922  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3923  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3924  target, time, command, arg);
3925  for (i = 0; i < nb_filtergraphs; i++) {
3926  FilterGraph *fg = filtergraphs[i];
3927  if (fg->graph) {
3928  if (time < 0) {
3929  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3930  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3931  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3932  } else if (key == 'c') {
3933  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3934  ret = AVERROR_PATCHWELCOME;
3935  } else {
3936  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3937  if (ret < 0)
3938  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3939  }
3940  }
3941  }
3942  } else {
3944  "Parse error, at least 3 arguments were expected, "
3945  "only %d given in string '%s'\n", n, buf);
3946  }
3947  }
3948  if (key == 'd' || key == 'D'){
3949  int debug=0;
3950  if(key == 'D') {
3951  debug = input_streams[0]->st->codec->debug<<1;
3952  if(!debug) debug = 1;
3953  while(debug & (FF_DEBUG_DCT_COEFF
3954 #if FF_API_DEBUG_MV
3955  |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3956 #endif
3957  )) //unsupported, would just crash
3958  debug += debug;
3959  }else{
3960  char buf[32];
3961  int k = 0;
3962  i = 0;
3963  set_tty_echo(1);
3964  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3965  if (k > 0)
3966  buf[i++] = k;
3967  buf[i] = 0;
3968  set_tty_echo(0);
3969  fprintf(stderr, "\n");
3970  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3971  fprintf(stderr,"error parsing debug value\n");
3972  }
3973  for(i=0;i<nb_input_streams;i++) {
3974  input_streams[i]->st->codec->debug = debug;
3975  }
3976  for(i=0;i<nb_output_streams;i++) {
3977  OutputStream *ost = output_streams[i];
3978  ost->enc_ctx->debug = debug;
3979  }
3980  if(debug) av_log_set_level(AV_LOG_DEBUG);
3981  fprintf(stderr,"debug=%d\n", debug);
3982  }
3983  if (key == '?'){
3984  fprintf(stderr, "key function\n"
3985  "? show this help\n"
3986  "+ increase verbosity\n"
3987  "- decrease verbosity\n"
3988  "c Send command to first matching filter supporting it\n"
3989  "C Send/Queue command to all matching filters\n"
3990  "D cycle through available debug modes\n"
3991  "h dump packets/hex press to cycle through the 3 states\n"
3992  "q quit\n"
3993  "s Show QP histogram\n"
3994  );
3995  }
3996  return 0;
3997 }
3998 
3999 #if HAVE_PTHREADS
4000 static void *input_thread(void *arg)
4001 {
4002  InputFile *f = arg;
4003  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4004  int ret = 0;
4005 
4006  while (1) {
4007  AVPacket pkt;
4008  ret = av_read_frame(f->ctx, &pkt);
4009 
4010  if (ret == AVERROR(EAGAIN)) {
4011  av_usleep(10000);
4012  continue;
4013  }
4014  if (ret < 0) {
4015  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4016  break;
4017  }
4018  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4019  if (flags && ret == AVERROR(EAGAIN)) {
4020  flags = 0;
4021  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4023  "Thread message queue blocking; consider raising the "
4024  "thread_queue_size option (current value: %d)\n",
4025  f->thread_queue_size);
4026  }
4027  if (ret < 0) {
4028  if (ret != AVERROR_EOF)
4029  av_log(f->ctx, AV_LOG_ERROR,
4030  "Unable to send packet to main thread: %s\n",
4031  av_err2str(ret));
4032  av_packet_unref(&pkt);
4033  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4034  break;
4035  }
4036  }
4037 
4038  return NULL;
4039 }
4040 
4041 static void free_input_threads(void)
4042 {
4043  int i;
4044 
4045  for (i = 0; i < nb_input_files; i++) {
4046  InputFile *f = input_files[i];
4047  AVPacket pkt;
4048 
4049  if (!f || !f->in_thread_queue)
4050  continue;
4052  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4053  av_packet_unref(&pkt);
4054 
4055  pthread_join(f->thread, NULL);
4056  f->joined = 1;
4057  av_thread_message_queue_free(&f->in_thread_queue);
4058  }
4059 }
4060 
4061 static int init_input_threads(void)
4062 {
4063  int i, ret;
4064 
4065  if (nb_input_files == 1)
4066  return 0;
4067 
4068  for (i = 0; i < nb_input_files; i++) {
4069  InputFile *f = input_files[i];
4070 
4071  if (f->ctx->pb ? !f->ctx->pb->seekable :
4072  strcmp(f->ctx->iformat->name, "lavfi"))
4073  f->non_blocking = 1;
4074  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4075  f->thread_queue_size, sizeof(AVPacket));
4076  if (ret < 0)
4077  return ret;
4078 
4079  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4080  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4081  av_thread_message_queue_free(&f->in_thread_queue);
4082  return AVERROR(ret);
4083  }
4084  }
4085  return 0;
4086 }
4087 
4088 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4089 {
4090  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4091  f->non_blocking ?
4093 }
4094 #endif
4095 
4097 {
4098  if (f->rate_emu) {
4099  int i;
4100  for (i = 0; i < f->nb_streams; i++) {
4101  InputStream *ist = input_streams[f->ist_index + i];
4102  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4103  int64_t now = av_gettime_relative() - ist->start;
4104  if (pts > now)
4105  return AVERROR(EAGAIN);
4106  }
4107  }
4108 
4109 #if HAVE_PTHREADS
4110  if (nb_input_files > 1)
4111  return get_input_packet_mt(f, pkt);
4112 #endif
4113  return av_read_frame(f->ctx, pkt);
4114 }
4115 
4116 static int got_eagain(void)
4117 {
4118  int i;
4119  for (i = 0; i < nb_output_streams; i++)
4120  if (output_streams[i]->unavailable)
4121  return 1;
4122  return 0;
4123 }
4124 
4125 static void reset_eagain(void)
4126 {
4127  int i;
4128  for (i = 0; i < nb_input_files; i++)
4129  input_files[i]->eagain = 0;
4130  for (i = 0; i < nb_output_streams; i++)
4131  output_streams[i]->unavailable = 0;
4132 }
4133 
4134 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4135 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4136  AVRational time_base)
4137 {
4138  int ret;
4139 
4140  if (!*duration) {
4141  *duration = tmp;
4142  return tmp_time_base;
4143  }
4144 
4145  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4146  if (ret < 0) {
4147  *duration = tmp;
4148  return tmp_time_base;
4149  }
4150 
4151  return time_base;
4152 }
4153 
4155 {
4156  InputStream *ist;
4157  AVCodecContext *avctx;
4158  int i, ret, has_audio = 0;
4159  int64_t duration = 0;
4160 
4161  ret = av_seek_frame(is, -1, is->start_time, 0);
4162  if (ret < 0)
4163  return ret;
4164 
4165  for (i = 0; i < ifile->nb_streams; i++) {
4166  ist = input_streams[ifile->ist_index + i];
4167  avctx = ist->dec_ctx;
4168 
4169  // flush decoders
4170  if (ist->decoding_needed) {
4171  process_input_packet(ist, NULL, 1);
4172  avcodec_flush_buffers(avctx);
4173  }
4174 
4175  /* duration is the length of the last frame in a stream
4176  * when audio stream is present we don't care about
4177  * last video frame length because it's not defined exactly */
4178  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4179  has_audio = 1;
4180  }
4181 
4182  for (i = 0; i < ifile->nb_streams; i++) {
4183  ist = input_streams[ifile->ist_index + i];
4184  avctx = ist->dec_ctx;
4185 
4186  if (has_audio) {
4187  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4188  AVRational sample_rate = {1, avctx->sample_rate};
4189 
4190  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4191  } else {
4192  continue;
4193  }
4194  } else {
4195  if (ist->framerate.num) {
4196  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4197  } else if (ist->st->avg_frame_rate.num) {
4198  duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4199  } else {
4200  duration = 1;
4201  }
4202  }
4203  if (!ifile->duration)
4204  ifile->time_base = ist->st->time_base;
4205  /* the total duration of the stream, max_pts - min_pts is
4206  * the duration of the stream without the last frame */
4207  duration += ist->max_pts - ist->min_pts;
4208  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4209  ifile->time_base);
4210  }
4211 
4212  if (ifile->loop > 0)
4213  ifile->loop--;
4214 
4215  return ret;
4216 }
4217 
4218 /*
4219  * Return
4220  * - 0 -- one packet was read and processed
4221  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4222  * this function should be called again
4223  * - AVERROR_EOF -- this function should not be called again
4224  */
4225 static int process_input(int file_index)
4226 {
4227  InputFile *ifile = input_files[file_index];
4228  AVFormatContext *is;
4229  InputStream *ist;
4230  AVPacket pkt;
4231  int ret, i, j;
4232  int64_t duration;
4233  int64_t pkt_dts;
4234 
4235  is = ifile->ctx;
4236  ret = get_input_packet(ifile, &pkt);
4237 
4238  if (ret == AVERROR(EAGAIN)) {
4239  ifile->eagain = 1;
4240  return ret;
4241  }
4242  if (ret < 0 && ifile->loop) {
4243  ret = seek_to_start(ifile, is);
4244  if (ret < 0)
4245  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4246  else
4247  ret = get_input_packet(ifile, &pkt);
4248  if (ret == AVERROR(EAGAIN)) {
4249  ifile->eagain = 1;
4250  return ret;
4251  }
4252  }
4253  if (ret < 0) {
4254  if (ret != AVERROR_EOF) {
4255  print_error(is->filename, ret);
4256  if (exit_on_error)
4257  exit_program(1);
4258  }
4259 
4260  for (i = 0; i < ifile->nb_streams; i++) {
4261  ist = input_streams[ifile->ist_index + i];
4262  if (ist->decoding_needed) {
4263  ret = process_input_packet(ist, NULL, 0);
4264  if (ret>0)
4265  return 0;
4266  }
4267 
4268  /* mark all outputs that don't go through lavfi as finished */
4269  for (j = 0; j < nb_output_streams; j++) {
4270  OutputStream *ost = output_streams[j];
4271 
4272  if (ost->source_index == ifile->ist_index + i &&
4273  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4274  finish_output_stream(ost);
4275  }
4276  }
4277 
4278  ifile->eof_reached = 1;
4279  return AVERROR(EAGAIN);
4280  }
4281 
4282  reset_eagain();
4283 
4284  if (do_pkt_dump) {
4286  is->streams[pkt.stream_index]);
4287  }
4288  /* the following test is needed in case new streams appear
4289  dynamically in stream : we ignore them */
4290  if (pkt.stream_index >= ifile->nb_streams) {
4291  report_new_stream(file_index, &pkt);
4292  goto discard_packet;
4293  }
4294 
4295  ist = input_streams[ifile->ist_index + pkt.stream_index];
4296 
4297  ist->data_size += pkt.size;
4298  ist->nb_packets++;
4299 
4300  if (ist->discard)
4301  goto discard_packet;
4302 
4303  if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4304  av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4305  exit_program(1);
4306  }
4307 
4308  if (debug_ts) {
4309  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4310  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4314  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4315  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4316  av_ts2str(input_files[ist->file_index]->ts_offset),
4317  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4318  }
4319 
4320  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4321  int64_t stime, stime2;
4322  // Correcting starttime based on the enabled streams
4323  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4324  // so we instead do it here as part of discontinuity handling
4325  if ( ist->next_dts == AV_NOPTS_VALUE
4326  && ifile->ts_offset == -is->start_time
4327  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4328  int64_t new_start_time = INT64_MAX;
4329  for (i=0; i<is->nb_streams; i++) {
4330  AVStream *st = is->streams[i];
4331  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4332  continue;
4333  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4334  }
4335  if (new_start_time > is->start_time) {
4336  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4337  ifile->ts_offset = -new_start_time;
4338  }
4339  }
4340 
4341  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4342  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4343  ist->wrap_correction_done = 1;
4344 
4345  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4346  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4347  ist->wrap_correction_done = 0;
4348  }
4349  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4350  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4351  ist->wrap_correction_done = 0;
4352  }
4353  }
4354 
4355  /* add the stream-global side data to the first packet */
4356  if (ist->nb_packets == 1) {
4357  for (i = 0; i < ist->st->nb_side_data; i++) {
4358  AVPacketSideData *src_sd = &ist->st->side_data[i];
4359  uint8_t *dst_data;
4360 
4361  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4362  continue;
4363 
4364  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4365  continue;
4366 
4367  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4368  if (!dst_data)
4369  exit_program(1);
4370 
4371  memcpy(dst_data, src_sd->data, src_sd->size);
4372  }
4373  }
4374 
4375  if (pkt.dts != AV_NOPTS_VALUE)
4376  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4377  if (pkt.pts != AV_NOPTS_VALUE)
4378  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4379 
4380  if (pkt.pts != AV_NOPTS_VALUE)
4381  pkt.pts *= ist->ts_scale;
4382  if (pkt.dts != AV_NOPTS_VALUE)
4383  pkt.dts *= ist->ts_scale;
4384 
4386  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4388  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4389  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4390  int64_t delta = pkt_dts - ifile->last_ts;
4391  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4392  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4393  ifile->ts_offset -= delta;
4395  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4396  delta, ifile->ts_offset);
4397  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4398  if (pkt.pts != AV_NOPTS_VALUE)
4399  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4400  }
4401  }
4402 
4403  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4404  if (pkt.pts != AV_NOPTS_VALUE) {
4405  pkt.pts += duration;
4406  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4407  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4408  }
4409 
4410  if (pkt.dts != AV_NOPTS_VALUE)
4411  pkt.dts += duration;
4412 
4414  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4416  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4417  !copy_ts) {
4418  int64_t delta = pkt_dts - ist->next_dts;
4419  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4420  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4421  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4422  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4423  ifile->ts_offset -= delta;
4425  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4426  delta, ifile->ts_offset);
4427  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4428  if (pkt.pts != AV_NOPTS_VALUE)
4429  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4430  }
4431  } else {
4432  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4433  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4434  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4435  pkt.dts = AV_NOPTS_VALUE;
4436  }
4437  if (pkt.pts != AV_NOPTS_VALUE){
4438  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4439  delta = pkt_pts - ist->next_dts;
4440  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4441  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4442  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4443  pkt.pts = AV_NOPTS_VALUE;
4444  }
4445  }
4446  }
4447  }
4448 
4449  if (pkt.dts != AV_NOPTS_VALUE)
4450  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4451 
4452  if (debug_ts) {
4453  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4455  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4456  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4457  av_ts2str(input_files[ist->file_index]->ts_offset),
4458  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4459  }
4460 
4461  sub2video_heartbeat(ist, pkt.pts);
4462 
4463  process_input_packet(ist, &pkt, 0);
4464 
4465 discard_packet:
4466  av_packet_unref(&pkt);
4467 
4468  return 0;
4469 }
4470 
4471 /**
4472  * Perform a step of transcoding for the specified filter graph.
4473  *
4474  * @param[in] graph filter graph to consider
4475  * @param[out] best_ist input stream where a frame would allow to continue
4476  * @return 0 for success, <0 for error
4477  */
4478 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4479 {
4480  int i, ret;
4481  int nb_requests, nb_requests_max = 0;
4482  InputFilter *ifilter;
4483  InputStream *ist;
4484 
4485  *best_ist = NULL;
4486  ret = avfilter_graph_request_oldest(graph->graph);
4487  if (ret >= 0)
4488  return reap_filters(0);
4489 
4490  if (ret == AVERROR_EOF) {
4491  ret = reap_filters(1);
4492  for (i = 0; i < graph->nb_outputs; i++)
4493  close_output_stream(graph->outputs[i]->ost);
4494  return ret;
4495  }
4496  if (ret != AVERROR(EAGAIN))
4497  return ret;
4498 
4499  for (i = 0; i < graph->nb_inputs; i++) {
4500  ifilter = graph->inputs[i];
4501  ist = ifilter->ist;
4502  if (input_files[ist->file_index]->eagain ||
4503  input_files[ist->file_index]->eof_reached)
4504  continue;
4505  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4506  if (nb_requests > nb_requests_max) {
4507  nb_requests_max = nb_requests;
4508  *best_ist = ist;
4509  }
4510  }
4511 
4512  if (!*best_ist)
4513  for (i = 0; i < graph->nb_outputs; i++)
4514  graph->outputs[i]->ost->unavailable = 1;
4515 
4516  return 0;
4517 }
4518 
4519 /**
4520  * Run a single step of transcoding.
4521  *
4522  * @return 0 for success, <0 for error
4523  */
4524 static int transcode_step(void)
4525 {
4526  OutputStream *ost;
4527  InputStream *ist = NULL;
4528  int ret;
4529 
4530  ost = choose_output();
4531  if (!ost) {
4532  if (got_eagain()) {
4533  reset_eagain();
4534  av_usleep(10000);
4535  return 0;
4536  }
4537  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4538  return AVERROR_EOF;
4539  }
4540 
4541  if (ost->filter && !ost->filter->graph->graph) {
4543  ret = configure_filtergraph(ost->filter->graph);
4544  if (ret < 0) {
4545  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4546  return ret;
4547  }
4548  }
4549  }
4550 
4551  if (ost->filter && ost->filter->graph->graph) {
4552  if (!ost->initialized) {
4553  char error[1024] = {0};
4554  ret = init_output_stream(ost, error, sizeof(error));
4555  if (ret < 0) {
4556  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
4557  ost->file_index, ost->index, error);
4558  exit_program(1);
4559  }
4560  }
4561  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4562  return ret;
4563  if (!ist)
4564  return 0;
4565  } else if (ost->filter) {
4566  int i;
4567  for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4568  InputFilter *ifilter = ost->filter->graph->inputs[i];
4569  if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4570  ist = ifilter->ist;
4571  break;
4572  }
4573  }
4574  if (!ist) {
4575  ost->inputs_done = 1;
4576  return 0;
4577  }
4578  } else {
4579  av_assert0(ost->source_index >= 0);
4580  ist = input_streams[ost->source_index];
4581  }
4582 
4583  ret = process_input(ist->file_index);
4584  if (ret == AVERROR(EAGAIN)) {
4585  if (input_files[ist->file_index]->eagain)
4586  ost->unavailable = 1;
4587  return 0;
4588  }
4589 
4590  if (ret < 0)
4591  return ret == AVERROR_EOF ? 0 : ret;
4592 
4593  return reap_filters(0);
4594 }
4595 
4596 /*
4597  * The following code is the main loop of the file converter
4598  */
4599 static int transcode(void)
4600 {
4601  int ret, i;
4602  AVFormatContext *os;
4603  OutputStream *ost;
4604  InputStream *ist;
4605  int64_t timer_start;
4606  int64_t total_packets_written = 0;
4607 
4608  ret = transcode_init();
4609  if (ret < 0)
4610  goto fail;
4611 
4612  if (stdin_interaction) {
4613  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4614  }
4615 
4616  timer_start = av_gettime_relative();
4617 
4618 #if HAVE_PTHREADS
4619  if ((ret = init_input_threads()) < 0)
4620  goto fail;
4621 #endif
4622 
4623  while (!received_sigterm) {
4624  int64_t cur_time= av_gettime_relative();
4625 
4626  /* if 'q' pressed, exits */
4627  if (stdin_interaction)
4628  if (check_keyboard_interaction(cur_time) < 0)
4629  break;
4630 
4631  /* check if there's any stream where output is still needed */
4632  if (!need_output()) {
4633  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4634  break;
4635  }
4636 
4637  ret = transcode_step();
4638  if (ret < 0 && ret != AVERROR_EOF) {
4639  char errbuf[128];
4640  av_strerror(ret, errbuf, sizeof(errbuf));
4641 
4642  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4643  break;
4644  }
4645 
4646  /* dump report by using the output first video and audio streams */
4647  print_report(0, timer_start, cur_time);
4648  }
4649 #if HAVE_PTHREADS
4650  free_input_threads();
4651 #endif
4652 
4653  /* at the end of stream, we must flush the decoder buffers */
4654  for (i = 0; i < nb_input_streams; i++) {
4655  ist = input_streams[i];
4656  if (!input_files[ist->file_index]->eof_reached) {
4657  process_input_packet(ist, NULL, 0);
4658  }
4659  }
4660  flush_encoders();
4661 
4662  term_exit();
4663 
4664  /* write the trailer if needed and close file */
4665  for (i = 0; i < nb_output_files; i++) {
4666  os = output_files[i]->ctx;
4667  if (!output_files[i]->header_written) {
4669  "Nothing was written into output file %d (%s), because "
4670  "at least one of its streams received no packets.\n",
4671  i, os->filename);
4672  continue;
4673  }
4674  if ((ret = av_write_trailer(os)) < 0) {
4675  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->filename, av_err2str(ret));
4676  if (exit_on_error)
4677  exit_program(1);
4678  }
4679  }
4680 
4681  /* dump report by using the first video and audio streams */
4682  print_report(1, timer_start, av_gettime_relative());
4683 
4684  /* close each encoder */
4685  for (i = 0; i < nb_output_streams; i++) {
4686  ost = output_streams[i];
4687  if (ost->encoding_needed) {
4688  av_freep(&ost->enc_ctx->stats_in);
4689  }
4690  total_packets_written += ost->packets_written;