FFmpeg
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavcodec/version.h"
68 #include "libavformat/os_support.h"
69 
70 # include "libavfilter/avfilter.h"
71 # include "libavfilter/buffersrc.h"
72 # include "libavfilter/buffersink.h"
73 
74 #if HAVE_SYS_RESOURCE_H
75 #include <sys/time.h>
76 #include <sys/types.h>
77 #include <sys/resource.h>
78 #elif HAVE_GETPROCESSTIMES
79 #include <windows.h>
80 #endif
81 #if HAVE_GETPROCESSMEMORYINFO
82 #include <windows.h>
83 #include <psapi.h>
84 #endif
85 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <windows.h>
87 #endif
88 
89 
90 #if HAVE_SYS_SELECT_H
91 #include <sys/select.h>
92 #endif
93 
94 #if HAVE_TERMIOS_H
95 #include <fcntl.h>
96 #include <sys/ioctl.h>
97 #include <sys/time.h>
98 #include <termios.h>
99 #elif HAVE_KBHIT
100 #include <conio.h>
101 #endif
102 
103 #include <time.h>
104 
105 #include "ffmpeg.h"
106 #include "cmdutils.h"
107 #include "sync_queue.h"
108 
109 #include "libavutil/avassert.h"
110 
111 const char program_name[] = "ffmpeg";
112 const int program_birth_year = 2000;
113 
114 static FILE *vstats_file;
115 
116 const char *const forced_keyframes_const_names[] = {
117  "n",
118  "n_forced",
119  "prev_forced_n",
120  "prev_forced_t",
121  "t",
122  NULL
123 };
124 
125 typedef struct BenchmarkTimeStamps {
126  int64_t real_usec;
127  int64_t user_usec;
128  int64_t sys_usec;
130 
132 static int64_t getmaxrss(void);
134 
135 static int64_t nb_frames_dup = 0;
136 static uint64_t dup_warning = 1000;
137 static int64_t nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 unsigned nb_output_dumped = 0;
140 
143 
148 
153 
156 
157 #if HAVE_TERMIOS_H
158 
159 /* init terminal so that we can grab keys */
160 static struct termios oldtty;
161 static int restore_tty;
162 #endif
163 
164 /* sub2video hack:
165  Convert subtitles to video with alpha to insert them in filter graphs.
166  This is a temporary solution until libavfilter gets real subtitles support.
167  */
168 
170 {
171  int ret;
172  AVFrame *frame = ist->sub2video.frame;
173 
175  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
176  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
178  if ((ret = av_frame_get_buffer(frame, 0)) < 0)
179  return ret;
180  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
181  return 0;
182 }
183 
184 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
185  AVSubtitleRect *r)
186 {
187  uint32_t *pal, *dst2;
188  uint8_t *src, *src2;
189  int x, y;
190 
191  if (r->type != SUBTITLE_BITMAP) {
192  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
193  return;
194  }
195  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
196  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
197  r->x, r->y, r->w, r->h, w, h
198  );
199  return;
200  }
201 
202  dst += r->y * dst_linesize + r->x * 4;
203  src = r->data[0];
204  pal = (uint32_t *)r->data[1];
205  for (y = 0; y < r->h; y++) {
206  dst2 = (uint32_t *)dst;
207  src2 = src;
208  for (x = 0; x < r->w; x++)
209  *(dst2++) = pal[*(src2++)];
210  dst += dst_linesize;
211  src += r->linesize[0];
212  }
213 }
214 
215 static void sub2video_push_ref(InputStream *ist, int64_t pts)
216 {
217  AVFrame *frame = ist->sub2video.frame;
218  int i;
219  int ret;
220 
221  av_assert1(frame->data[0]);
222  ist->sub2video.last_pts = frame->pts = pts;
223  for (i = 0; i < ist->nb_filters; i++) {
227  if (ret != AVERROR_EOF && ret < 0)
228  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
229  av_err2str(ret));
230  }
231 }
232 
233 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
234 {
235  AVFrame *frame = ist->sub2video.frame;
236  int8_t *dst;
237  int dst_linesize;
238  int num_rects, i;
239  int64_t pts, end_pts;
240 
241  if (!frame)
242  return;
243  if (sub) {
244  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
245  AV_TIME_BASE_Q, ist->st->time_base);
246  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
247  AV_TIME_BASE_Q, ist->st->time_base);
248  num_rects = sub->num_rects;
249  } else {
250  /* If we are initializing the system, utilize current heartbeat
251  PTS as the start time, and show until the following subpicture
252  is received. Otherwise, utilize the previous subpicture's end time
253  as the fall-back value. */
254  pts = ist->sub2video.initialize ?
255  heartbeat_pts : ist->sub2video.end_pts;
256  end_pts = INT64_MAX;
257  num_rects = 0;
258  }
259  if (sub2video_get_blank_frame(ist) < 0) {
261  "Impossible to get a blank canvas.\n");
262  return;
263  }
264  dst = frame->data [0];
265  dst_linesize = frame->linesize[0];
266  for (i = 0; i < num_rects; i++)
267  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
268  sub2video_push_ref(ist, pts);
269  ist->sub2video.end_pts = end_pts;
270  ist->sub2video.initialize = 0;
271 }
272 
273 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
274 {
275  InputFile *infile = input_files[ist->file_index];
276  int i, j, nb_reqs;
277  int64_t pts2;
278 
279  /* When a frame is read from a file, examine all sub2video streams in
280  the same file and send the sub2video frame again. Otherwise, decoded
281  video frames could be accumulating in the filter graph while a filter
282  (possibly overlay) is desperately waiting for a subtitle frame. */
283  for (i = 0; i < infile->nb_streams; i++) {
284  InputStream *ist2 = input_streams[infile->ist_index + i];
285  if (!ist2->sub2video.frame)
286  continue;
287  /* subtitles seem to be usually muxed ahead of other streams;
288  if not, subtracting a larger time here is necessary */
289  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
290  /* do not send the heartbeat frame if the subtitle is already ahead */
291  if (pts2 <= ist2->sub2video.last_pts)
292  continue;
293  if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
294  /* if we have hit the end of the current displayed subpicture,
295  or if we need to initialize the system, update the
296  overlayed subpicture and its start/end times */
297  sub2video_update(ist2, pts2 + 1, NULL);
298  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
299  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
300  if (nb_reqs)
301  sub2video_push_ref(ist2, pts2);
302  }
303 }
304 
305 static void sub2video_flush(InputStream *ist)
306 {
307  int i;
308  int ret;
309 
310  if (ist->sub2video.end_pts < INT64_MAX)
311  sub2video_update(ist, INT64_MAX, NULL);
312  for (i = 0; i < ist->nb_filters; i++) {
314  if (ret != AVERROR_EOF && ret < 0)
315  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
316  }
317 }
318 
319 /* end of sub2video hack */
320 
321 static void term_exit_sigsafe(void)
322 {
323 #if HAVE_TERMIOS_H
324  if(restore_tty)
325  tcsetattr (0, TCSANOW, &oldtty);
326 #endif
327 }
328 
329 void term_exit(void)
330 {
331  av_log(NULL, AV_LOG_QUIET, "%s", "");
333 }
334 
335 static volatile int received_sigterm = 0;
336 static volatile int received_nb_signals = 0;
338 static volatile int ffmpeg_exited = 0;
341 
342 static void
344 {
345  int ret;
346  received_sigterm = sig;
349  if(received_nb_signals > 3) {
350  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
351  strlen("Received > 3 system signals, hard exiting\n"));
352  if (ret < 0) { /* Do nothing */ };
353  exit(123);
354  }
355 }
356 
357 #if HAVE_SETCONSOLECTRLHANDLER
358 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
359 {
360  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
361 
362  switch (fdwCtrlType)
363  {
364  case CTRL_C_EVENT:
365  case CTRL_BREAK_EVENT:
366  sigterm_handler(SIGINT);
367  return TRUE;
368 
369  case CTRL_CLOSE_EVENT:
370  case CTRL_LOGOFF_EVENT:
371  case CTRL_SHUTDOWN_EVENT:
372  sigterm_handler(SIGTERM);
373  /* Basically, with these 3 events, when we return from this method the
374  process is hard terminated, so stall as long as we need to
375  to try and let the main thread(s) clean up and gracefully terminate
376  (we have at most 5 seconds, but should be done far before that). */
377  while (!ffmpeg_exited) {
378  Sleep(0);
379  }
380  return TRUE;
381 
382  default:
383  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
384  return FALSE;
385  }
386 }
387 #endif
388 
389 #ifdef __linux__
390 #define SIGNAL(sig, func) \
391  do { \
392  action.sa_handler = func; \
393  sigaction(sig, &action, NULL); \
394  } while (0)
395 #else
396 #define SIGNAL(sig, func) \
397  signal(sig, func)
398 #endif
399 
400 void term_init(void)
401 {
402 #if defined __linux__
403  struct sigaction action = {0};
404  action.sa_handler = sigterm_handler;
405 
406  /* block other interrupts while processing this one */
407  sigfillset(&action.sa_mask);
408 
409  /* restart interruptible functions (i.e. don't fail with EINTR) */
410  action.sa_flags = SA_RESTART;
411 #endif
412 
413 #if HAVE_TERMIOS_H
414  if (stdin_interaction) {
415  struct termios tty;
416  if (tcgetattr (0, &tty) == 0) {
417  oldtty = tty;
418  restore_tty = 1;
419 
420  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
421  |INLCR|IGNCR|ICRNL|IXON);
422  tty.c_oflag |= OPOST;
423  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
424  tty.c_cflag &= ~(CSIZE|PARENB);
425  tty.c_cflag |= CS8;
426  tty.c_cc[VMIN] = 1;
427  tty.c_cc[VTIME] = 0;
428 
429  tcsetattr (0, TCSANOW, &tty);
430  }
431  SIGNAL(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
432  }
433 #endif
434 
435  SIGNAL(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
436  SIGNAL(SIGTERM, sigterm_handler); /* Termination (ANSI). */
437 #ifdef SIGXCPU
438  SIGNAL(SIGXCPU, sigterm_handler);
439 #endif
440 #ifdef SIGPIPE
441  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
442 #endif
443 #if HAVE_SETCONSOLECTRLHANDLER
444  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
445 #endif
446 }
447 
448 /* read a key without blocking */
449 static int read_key(void)
450 {
451  unsigned char ch;
452 #if HAVE_TERMIOS_H
453  int n = 1;
454  struct timeval tv;
455  fd_set rfds;
456 
457  FD_ZERO(&rfds);
458  FD_SET(0, &rfds);
459  tv.tv_sec = 0;
460  tv.tv_usec = 0;
461  n = select(1, &rfds, NULL, NULL, &tv);
462  if (n > 0) {
463  n = read(0, &ch, 1);
464  if (n == 1)
465  return ch;
466 
467  return n;
468  }
469 #elif HAVE_KBHIT
470 # if HAVE_PEEKNAMEDPIPE
471  static int is_pipe;
472  static HANDLE input_handle;
473  DWORD dw, nchars;
474  if(!input_handle){
475  input_handle = GetStdHandle(STD_INPUT_HANDLE);
476  is_pipe = !GetConsoleMode(input_handle, &dw);
477  }
478 
479  if (is_pipe) {
480  /* When running under a GUI, you will end here. */
481  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
482  // input pipe may have been closed by the program that ran ffmpeg
483  return -1;
484  }
485  //Read it
486  if(nchars != 0) {
487  read(0, &ch, 1);
488  return ch;
489  }else{
490  return -1;
491  }
492  }
493 # endif
494  if(kbhit())
495  return(getch());
496 #endif
497  return -1;
498 }
499 
500 static int decode_interrupt_cb(void *ctx)
501 {
503 }
504 
506 
507 static void ffmpeg_cleanup(int ret)
508 {
509  int i, j;
510 
511  if (do_benchmark) {
512  int maxrss = getmaxrss() / 1024;
513  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
514  }
515 
516  for (i = 0; i < nb_filtergraphs; i++) {
517  FilterGraph *fg = filtergraphs[i];
519  for (j = 0; j < fg->nb_inputs; j++) {
520  InputFilter *ifilter = fg->inputs[j];
521  struct InputStream *ist = ifilter->ist;
522 
523  if (ifilter->frame_queue) {
524  AVFrame *frame;
525  while (av_fifo_read(ifilter->frame_queue, &frame, 1) >= 0)
527  av_fifo_freep2(&ifilter->frame_queue);
528  }
529  av_freep(&ifilter->displaymatrix);
530  if (ist->sub2video.sub_queue) {
531  AVSubtitle sub;
532  while (av_fifo_read(ist->sub2video.sub_queue, &sub, 1) >= 0)
535  }
536  av_buffer_unref(&ifilter->hw_frames_ctx);
537  av_freep(&ifilter->name);
538  av_freep(&fg->inputs[j]);
539  }
540  av_freep(&fg->inputs);
541  for (j = 0; j < fg->nb_outputs; j++) {
542  OutputFilter *ofilter = fg->outputs[j];
543 
544  avfilter_inout_free(&ofilter->out_tmp);
545  av_freep(&ofilter->name);
547  av_freep(&fg->outputs[j]);
548  }
549  av_freep(&fg->outputs);
550  av_freep(&fg->graph_desc);
551 
553  }
555 
556  /* close files */
557  for (i = 0; i < nb_output_files; i++)
559 
560  for (i = 0; i < nb_output_streams; i++) {
562 
563  if (!ost)
564  continue;
565 
566  av_bsf_free(&ost->bsf_ctx);
567 
568  av_frame_free(&ost->filtered_frame);
569  av_frame_free(&ost->sq_frame);
570  av_frame_free(&ost->last_frame);
571  av_packet_free(&ost->pkt);
572  av_dict_free(&ost->encoder_opts);
573 
574  av_freep(&ost->forced_keyframes);
575  av_expr_free(ost->forced_keyframes_pexpr);
576  av_freep(&ost->avfilter);
577  av_freep(&ost->logfile_prefix);
578 
579 #if FFMPEG_OPT_MAP_CHANNEL
580  av_freep(&ost->audio_channels_map);
581  ost->audio_channels_mapped = 0;
582 #endif
583 
584  av_dict_free(&ost->sws_dict);
585  av_dict_free(&ost->swr_opts);
586 
587  if (ost->enc_ctx)
588  av_freep(&ost->enc_ctx->stats_in);
589  avcodec_free_context(&ost->enc_ctx);
590 
592  }
594  for (i = 0; i < nb_input_files; i++) {
597  }
598  for (i = 0; i < nb_input_streams; i++) {
599  InputStream *ist = input_streams[i];
600 
602  av_packet_free(&ist->pkt);
603  av_dict_free(&ist->decoder_opts);
606  av_freep(&ist->filters);
607  av_freep(&ist->hwaccel_device);
608  av_freep(&ist->dts_buffer);
609 
611 
613  }
614 
615  if (vstats_file) {
616  if (fclose(vstats_file))
618  "Error closing vstats file, loss of information possible: %s\n",
619  av_err2str(AVERROR(errno)));
620  }
623 
628 
629  uninit_opts();
630 
632 
633  if (received_sigterm) {
634  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
635  (int) received_sigterm);
636  } else if (ret && atomic_load(&transcode_init_done)) {
637  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
638  }
639  term_exit();
640  ffmpeg_exited = 1;
641 }
642 
644 {
645  const AVDictionaryEntry *t = NULL;
646 
647  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
649  }
650 }
651 
653 {
654  const AVDictionaryEntry *t;
655  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
656  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
657  exit_program(1);
658  }
659 }
660 
661 static void abort_codec_experimental(const AVCodec *c, int encoder)
662 {
663  exit_program(1);
664 }
665 
666 static void update_benchmark(const char *fmt, ...)
667 {
668  if (do_benchmark_all) {
670  va_list va;
671  char buf[1024];
672 
673  if (fmt) {
674  va_start(va, fmt);
675  vsnprintf(buf, sizeof(buf), fmt, va);
676  va_end(va);
678  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
681  t.real_usec - current_time.real_usec, buf);
682  }
683  current_time = t;
684  }
685 }
686 
688 {
689  OutputFile *of = output_files[ost->file_index];
690  ost->finished |= ENCODER_FINISHED;
691 
692  if (ost->sq_idx_encode >= 0)
693  sq_send(of->sq_encode, ost->sq_idx_encode, SQFRAME(NULL));
694 }
695 
696 /*
697  * Send a single packet to the output, applying any bitstream filters
698  * associated with the output stream. This may result in any number
699  * of packets actually being written, depending on what bitstream
700  * filters are applied. The supplied packet is consumed and will be
701  * blank (as if newly-allocated) when this function returns.
702  *
703  * If eof is set, instead indicate EOF to all bitstream filters and
704  * therefore flush any delayed packets to the output. A blank packet
705  * must be supplied in this case.
706  */
708  OutputStream *ost, int eof)
709 {
710  const char *err_msg;
711  int ret = 0;
712 
713  if (!eof && pkt->dts != AV_NOPTS_VALUE)
714  ost->last_mux_dts = av_rescale_q(pkt->dts, ost->mux_timebase, AV_TIME_BASE_Q);
715 
716  /* apply the output bitstream filters */
717  if (ost->bsf_ctx) {
718  int bsf_eof = 0;
719 
720  ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
721  if (ret < 0) {
722  err_msg = "submitting a packet for bitstream filtering";
723  goto fail;
724  }
725 
726  while (!bsf_eof) {
727  ret = av_bsf_receive_packet(ost->bsf_ctx, pkt);
728  if (ret == AVERROR(EAGAIN))
729  return;
730  else if (ret == AVERROR_EOF)
731  bsf_eof = 1;
732  else if (ret < 0) {
733  err_msg = "applying bitstream filters to a packet";
734  goto fail;
735  }
736 
737  ret = of_submit_packet(of, bsf_eof ? NULL : pkt, ost);
738  if (ret < 0)
739  goto mux_fail;
740  }
741  } else {
742  ret = of_submit_packet(of, eof ? NULL : pkt, ost);
743  if (ret < 0)
744  goto mux_fail;
745  }
746 
747  return;
748 
749 mux_fail:
750  err_msg = "submitting a packet to the muxer";
751 
752 fail:
753  av_log(NULL, AV_LOG_ERROR, "Error %s for output stream #%d:%d.\n",
754  err_msg, ost->file_index, ost->index);
755  if (exit_on_error)
756  exit_program(1);
757 
758 }
759 
761 {
762  OutputFile *of = output_files[ost->file_index];
763 
764  if (of->recording_time != INT64_MAX &&
765  av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
766  AV_TIME_BASE_Q) >= 0) {
768  return 0;
769  }
770  return 1;
771 }
772 
774  AVFrame *frame)
775 {
776  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
777  int64_t orig_pts = AV_NOPTS_VALUE;
778  AVCodecContext *enc = ost->enc_ctx;
779  AVRational filter_tb = (AVRational){ -1, -1 };
780  if (!frame || frame->pts == AV_NOPTS_VALUE ||
781  !enc || !ost->filter || !ost->filter->graph->graph)
782  goto early_exit;
783 
784  {
785  AVFilterContext *filter = ost->filter->filter;
786 
787  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
788  AVRational tb = enc->time_base;
789  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
790  filter_tb = av_buffersink_get_time_base(filter);
791  orig_pts = frame->pts;
792 
793  tb.den <<= extra_bits;
794  float_pts =
795  av_rescale_q(frame->pts, filter_tb, tb) -
797  float_pts /= 1 << extra_bits;
798  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
799  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
800 
801  frame->pts =
802  av_rescale_q(frame->pts, filter_tb, enc->time_base) -
804  }
805 
806 early_exit:
807 
808  if (debug_ts) {
809  av_log(NULL, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
810  frame ? av_ts2str(orig_pts) : "NULL",
811  frame ? av_ts2timestr(orig_pts, &filter_tb) : "NULL",
812  filter_tb.num, filter_tb.den);
813 
814  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
815  frame ? av_ts2str(frame->pts) : "NULL",
816  (enc && frame) ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
817  float_pts,
818  enc ? enc->time_base.num : -1,
819  enc ? enc->time_base.den : -1);
820  }
821 
822  return float_pts;
823 }
824 
826  char *error, int error_len);
827 
829  unsigned int fatal)
830 {
831  int ret = AVERROR_BUG;
832  char error[1024] = {0};
833 
834  if (ost->initialized)
835  return 0;
836 
837  ret = init_output_stream(ost, frame, error, sizeof(error));
838  if (ret < 0) {
839  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
840  ost->file_index, ost->index, error);
841 
842  if (fatal)
843  exit_program(1);
844  }
845 
846  return ret;
847 }
848 
849 static double psnr(double d)
850 {
851  return -10.0 * log10(d);
852 }
853 
854 static void update_video_stats(OutputStream *ost, const AVPacket *pkt, int write_vstats)
855 {
857  NULL);
858  AVCodecContext *enc = ost->enc_ctx;
859  int64_t frame_number;
860  double ti1, bitrate, avg_bitrate;
861 
862  ost->quality = sd ? AV_RL32(sd) : -1;
863  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
864 
865  for (int i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
866  if (sd && i < sd[5])
867  ost->error[i] = AV_RL64(sd + 8 + 8*i);
868  else
869  ost->error[i] = -1;
870  }
871 
872  if (!write_vstats)
873  return;
874 
875  /* this is executed just the first time update_video_stats is called */
876  if (!vstats_file) {
877  vstats_file = fopen(vstats_filename, "w");
878  if (!vstats_file) {
879  perror("fopen");
880  exit_program(1);
881  }
882  }
883 
884  frame_number = ost->packets_encoded;
885  if (vstats_version <= 1) {
886  fprintf(vstats_file, "frame= %5"PRId64" q= %2.1f ", frame_number,
887  ost->quality / (float)FF_QP2LAMBDA);
888  } else {
889  fprintf(vstats_file, "out= %2d st= %2d frame= %5"PRId64" q= %2.1f ", ost->file_index, ost->index, frame_number,
890  ost->quality / (float)FF_QP2LAMBDA);
891  }
892 
893  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
894  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
895 
896  fprintf(vstats_file,"f_size= %6d ", pkt->size);
897  /* compute pts value */
898  ti1 = pkt->dts * av_q2d(ost->mux_timebase);
899  if (ti1 < 0.01)
900  ti1 = 0.01;
901 
902  bitrate = (pkt->size * 8) / av_q2d(enc->time_base) / 1000.0;
903  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
904  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
905  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
906  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
907 }
908 
910 {
911  AVCodecContext *enc = ost->enc_ctx;
912  AVPacket *pkt = ost->pkt;
913  const char *type_desc = av_get_media_type_string(enc->codec_type);
914  const char *action = frame ? "encode" : "flush";
915  int ret;
916 
917  if (frame) {
918  ost->frames_encoded++;
919  ost->samples_encoded += frame->nb_samples;
920 
921  if (debug_ts) {
922  av_log(NULL, AV_LOG_INFO, "encoder <- type:%s "
923  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
924  type_desc,
925  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
926  enc->time_base.num, enc->time_base.den);
927  }
928  }
929 
931 
932  ret = avcodec_send_frame(enc, frame);
933  if (ret < 0 && !(ret == AVERROR_EOF && !frame)) {
934  av_log(NULL, AV_LOG_ERROR, "Error submitting %s frame to the encoder\n",
935  type_desc);
936  return ret;
937  }
938 
939  while (1) {
941  update_benchmark("%s_%s %d.%d", action, type_desc,
942  ost->file_index, ost->index);
943 
944  /* if two pass, output log on success and EOF */
945  if ((ret >= 0 || ret == AVERROR_EOF) && ost->logfile && enc->stats_out)
946  fprintf(ost->logfile, "%s", enc->stats_out);
947 
948  if (ret == AVERROR(EAGAIN)) {
949  av_assert0(frame); // should never happen during flushing
950  return 0;
951  } else if (ret == AVERROR_EOF) {
952  output_packet(of, pkt, ost, 1);
953  return ret;
954  } else if (ret < 0) {
955  av_log(NULL, AV_LOG_ERROR, "%s encoding failed\n", type_desc);
956  return ret;
957  }
958 
959  if (debug_ts) {
960  av_log(NULL, AV_LOG_INFO, "encoder -> type:%s "
961  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s "
962  "duration:%s duration_time:%s\n",
963  type_desc,
967  }
968 
969  av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
970 
971  if (debug_ts) {
972  av_log(NULL, AV_LOG_INFO, "encoder -> type:%s "
973  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s "
974  "duration:%s duration_time:%s\n",
975  type_desc,
979  }
980 
981  if (enc->codec_type == AVMEDIA_TYPE_VIDEO)
983 
984  ost->packets_encoded++;
985 
986  output_packet(of, pkt, ost, 0);
987  }
988 
989  av_assert0(0);
990 }
991 
993  AVFrame *frame)
994 {
995  int ret;
996 
997  if (ost->sq_idx_encode < 0)
998  return encode_frame(of, ost, frame);
999 
1000  if (frame) {
1001  ret = av_frame_ref(ost->sq_frame, frame);
1002  if (ret < 0)
1003  return ret;
1004  frame = ost->sq_frame;
1005  }
1006 
1007  ret = sq_send(of->sq_encode, ost->sq_idx_encode,
1008  SQFRAME(frame));
1009  if (ret < 0) {
1010  if (frame)
1012  if (ret != AVERROR_EOF)
1013  return ret;
1014  }
1015 
1016  while (1) {
1017  AVFrame *enc_frame = ost->sq_frame;
1018 
1019  ret = sq_receive(of->sq_encode, ost->sq_idx_encode,
1020  SQFRAME(enc_frame));
1021  if (ret == AVERROR_EOF) {
1022  enc_frame = NULL;
1023  } else if (ret < 0) {
1024  return (ret == AVERROR(EAGAIN)) ? 0 : ret;
1025  }
1026 
1027  ret = encode_frame(of, ost, enc_frame);
1028  if (enc_frame)
1029  av_frame_unref(enc_frame);
1030  if (ret < 0) {
1031  if (ret == AVERROR_EOF)
1033  return ret;
1034  }
1035  }
1036 }
1037 
1039  AVFrame *frame)
1040 {
1041  int ret;
1042 
1044 
1045  if (!check_recording_time(ost))
1046  return;
1047 
1048  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
1049  frame->pts = ost->sync_opts;
1050  ost->sync_opts = frame->pts + frame->nb_samples;
1051 
1052  ret = submit_encode_frame(of, ost, frame);
1053  if (ret < 0 && ret != AVERROR_EOF)
1054  exit_program(1);
1055 }
1056 
1057 static void do_subtitle_out(OutputFile *of,
1058  OutputStream *ost,
1059  AVSubtitle *sub)
1060 {
1061  int subtitle_out_max_size = 1024 * 1024;
1062  int subtitle_out_size, nb, i, ret;
1063  AVCodecContext *enc;
1064  AVPacket *pkt = ost->pkt;
1065  int64_t pts;
1066 
1067  if (sub->pts == AV_NOPTS_VALUE) {
1068  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1069  if (exit_on_error)
1070  exit_program(1);
1071  return;
1072  }
1073 
1074  enc = ost->enc_ctx;
1075 
1076  /* Note: DVB subtitle need one packet to draw them and one other
1077  packet to clear them */
1078  /* XXX: signal it in the codec context ? */
1079  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1080  nb = 2;
1081  else
1082  nb = 1;
1083 
1084  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1085  pts = sub->pts;
1086  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1087  pts -= output_files[ost->file_index]->start_time;
1088  for (i = 0; i < nb; i++) {
1089  unsigned save_num_rects = sub->num_rects;
1090 
1091  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1092  if (!check_recording_time(ost))
1093  return;
1094 
1095  ret = av_new_packet(pkt, subtitle_out_max_size);
1096  if (ret < 0) {
1097  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle encode buffer\n");
1098  exit_program(1);
1099  }
1100 
1101  sub->pts = pts;
1102  // start_display_time is required to be 0
1103  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1104  sub->end_display_time -= sub->start_display_time;
1105  sub->start_display_time = 0;
1106  if (i == 1)
1107  sub->num_rects = 0;
1108 
1109  ost->frames_encoded++;
1110 
1111  subtitle_out_size = avcodec_encode_subtitle(enc, pkt->data, pkt->size, sub);
1112  if (i == 1)
1113  sub->num_rects = save_num_rects;
1114  if (subtitle_out_size < 0) {
1115  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1116  exit_program(1);
1117  }
1118 
1119  av_shrink_packet(pkt, subtitle_out_size);
1120  pkt->pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1121  pkt->duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1122  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1123  /* XXX: the pts correction is handled here. Maybe handling
1124  it in the codec would be better */
1125  if (i == 0)
1126  pkt->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1127  else
1128  pkt->pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1129  }
1130  pkt->dts = pkt->pts;
1131  output_packet(of, pkt, ost, 0);
1132  }
1133 }
1134 
1135 /* May modify/reset next_picture */
1136 static void do_video_out(OutputFile *of,
1137  OutputStream *ost,
1138  AVFrame *next_picture)
1139 {
1140  int ret;
1141  AVCodecContext *enc = ost->enc_ctx;
1142  AVRational frame_rate;
1143  int64_t nb_frames, nb0_frames, i;
1144  double delta, delta0;
1145  double duration = 0;
1146  double sync_ipts = AV_NOPTS_VALUE;
1147  InputStream *ist = NULL;
1148  AVFilterContext *filter = ost->filter->filter;
1149 
1150  init_output_stream_wrapper(ost, next_picture, 1);
1151  sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1152 
1153  if (ost->source_index >= 0)
1154  ist = input_streams[ost->source_index];
1155 
1156  frame_rate = av_buffersink_get_frame_rate(filter);
1157  if (frame_rate.num > 0 && frame_rate.den > 0)
1158  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1159 
1160  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1161  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1162 
1163  if (!ost->filters_script &&
1164  !ost->filters &&
1165  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1166  next_picture &&
1167  ist &&
1168  lrintf(next_picture->duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1169  duration = lrintf(next_picture->duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1170  }
1171 
1172  if (!next_picture) {
1173  //end, flushing
1174  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1175  ost->last_nb0_frames[1],
1176  ost->last_nb0_frames[2]);
1177  } else {
1178  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1179  delta = delta0 + duration;
1180 
1181  /* by default, we output a single frame */
1182  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1183  nb_frames = 1;
1184 
1185  if (delta0 < 0 &&
1186  delta > 0 &&
1187  ost->vsync_method != VSYNC_PASSTHROUGH &&
1188  ost->vsync_method != VSYNC_DROP) {
1189  if (delta0 < -0.6) {
1190  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1191  } else
1192  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1193  sync_ipts = ost->sync_opts;
1194  duration += delta0;
1195  delta0 = 0;
1196  }
1197 
1198  switch (ost->vsync_method) {
1199  case VSYNC_VSCFR:
1200  if (ost->vsync_frame_number == 0 && delta0 >= 0.5) {
1201  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1202  delta = duration;
1203  delta0 = 0;
1204  ost->sync_opts = llrint(sync_ipts);
1205  }
1206  case VSYNC_CFR:
1207  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1208  if (frame_drop_threshold && delta < frame_drop_threshold && ost->vsync_frame_number) {
1209  nb_frames = 0;
1210  } else if (delta < -1.1)
1211  nb_frames = 0;
1212  else if (delta > 1.1) {
1213  nb_frames = llrintf(delta);
1214  if (delta0 > 1.1)
1215  nb0_frames = llrintf(delta0 - 0.6);
1216  }
1217  next_picture->duration = 1;
1218  break;
1219  case VSYNC_VFR:
1220  if (delta <= -0.6)
1221  nb_frames = 0;
1222  else if (delta > 0.6)
1223  ost->sync_opts = llrint(sync_ipts);
1224  next_picture->duration = duration;
1225  break;
1226  case VSYNC_DROP:
1227  case VSYNC_PASSTHROUGH:
1228  next_picture->duration = duration;
1229  ost->sync_opts = llrint(sync_ipts);
1230  break;
1231  default:
1232  av_assert0(0);
1233  }
1234  }
1235 
1236  /*
1237  * For video, number of frames in == number of packets out.
1238  * But there may be reordering, so we can't throw away frames on encoder
1239  * flush, we need to limit them here, before they go into encoder.
1240  */
1241  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->vsync_frame_number);
1242  nb0_frames = FFMIN(nb0_frames, nb_frames);
1243 
1244  memmove(ost->last_nb0_frames + 1,
1245  ost->last_nb0_frames,
1246  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1247  ost->last_nb0_frames[0] = nb0_frames;
1248 
1249  if (nb0_frames == 0 && ost->last_dropped) {
1250  nb_frames_drop++;
1252  "*** dropping frame %"PRId64" from stream %d at ts %"PRId64"\n",
1253  ost->vsync_frame_number, ost->st->index, ost->last_frame->pts);
1254  }
1255  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1256  if (nb_frames > dts_error_threshold * 30) {
1257  av_log(NULL, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", nb_frames - 1);
1258  nb_frames_drop++;
1259  return;
1260  }
1261  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1262  av_log(NULL, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", nb_frames - 1);
1263  if (nb_frames_dup > dup_warning) {
1264  av_log(NULL, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", dup_warning);
1265  dup_warning *= 10;
1266  }
1267  }
1268  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1269  ost->dropped_keyframe = ost->last_dropped && next_picture && next_picture->key_frame;
1270 
1271  /* duplicates frame if needed */
1272  for (i = 0; i < nb_frames; i++) {
1273  AVFrame *in_picture;
1274  int forced_keyframe = 0;
1275  double pts_time;
1276 
1277  if (i < nb0_frames && ost->last_frame->buf[0]) {
1278  in_picture = ost->last_frame;
1279  } else
1280  in_picture = next_picture;
1281 
1282  if (!in_picture)
1283  return;
1284 
1285  in_picture->pts = ost->sync_opts;
1286 
1287  if (!check_recording_time(ost))
1288  return;
1289 
1290  in_picture->quality = enc->global_quality;
1291  in_picture->pict_type = 0;
1292 
1293  if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1294  in_picture->pts != AV_NOPTS_VALUE)
1295  ost->forced_kf_ref_pts = in_picture->pts;
1296 
1297  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1298  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1299  if (ost->forced_kf_index < ost->forced_kf_count &&
1300  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1301  ost->forced_kf_index++;
1302  forced_keyframe = 1;
1303  } else if (ost->forced_keyframes_pexpr) {
1304  double res;
1305  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1306  res = av_expr_eval(ost->forced_keyframes_pexpr,
1307  ost->forced_keyframes_expr_const_values, NULL);
1308  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1309  ost->forced_keyframes_expr_const_values[FKF_N],
1310  ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1311  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1312  ost->forced_keyframes_expr_const_values[FKF_T],
1313  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1314  res);
1315  if (res) {
1316  forced_keyframe = 1;
1317  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1318  ost->forced_keyframes_expr_const_values[FKF_N];
1319  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1320  ost->forced_keyframes_expr_const_values[FKF_T];
1321  ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1322  }
1323 
1324  ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1325  } else if ( ost->forced_keyframes
1326  && !strncmp(ost->forced_keyframes, "source", 6)
1327  && in_picture->key_frame==1
1328  && !i) {
1329  forced_keyframe = 1;
1330  } else if ( ost->forced_keyframes
1331  && !strncmp(ost->forced_keyframes, "source_no_drop", 14)
1332  && !i) {
1333  forced_keyframe = (in_picture->key_frame == 1) || ost->dropped_keyframe;
1334  ost->dropped_keyframe = 0;
1335  }
1336 
1337  if (forced_keyframe) {
1338  in_picture->pict_type = AV_PICTURE_TYPE_I;
1339  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1340  }
1341 
1342  ret = submit_encode_frame(of, ost, in_picture);
1343  if (ret < 0 && ret != AVERROR_EOF)
1344  exit_program(1);
1345 
1346  ost->sync_opts++;
1347  ost->vsync_frame_number++;
1348  }
1349 
1350  av_frame_unref(ost->last_frame);
1351  if (next_picture)
1352  av_frame_move_ref(ost->last_frame, next_picture);
1353 }
1354 
1355 /**
1356  * Get and encode new output from any of the filtergraphs, without causing
1357  * activity.
1358  *
1359  * @return 0 for success, <0 for severe errors
1360  */
1361 static int reap_filters(int flush)
1362 {
1363  AVFrame *filtered_frame = NULL;
1364  int i;
1365 
1366  /* Reap all buffers present in the buffer sinks */
1367  for (i = 0; i < nb_output_streams; i++) {
1369  OutputFile *of = output_files[ost->file_index];
1371  AVCodecContext *enc = ost->enc_ctx;
1372  int ret = 0;
1373 
1374  if (!ost->filter || !ost->filter->graph->graph)
1375  continue;
1376  filter = ost->filter->filter;
1377 
1378  /*
1379  * Unlike video, with audio the audio frame size matters.
1380  * Currently we are fully reliant on the lavfi filter chain to
1381  * do the buffering deed for us, and thus the frame size parameter
1382  * needs to be set accordingly. Where does one get the required
1383  * frame size? From the initialized AVCodecContext of an audio
1384  * encoder. Thus, if we have gotten to an audio stream, initialize
1385  * the encoder earlier than receiving the first AVFrame.
1386  */
1389 
1390  filtered_frame = ost->filtered_frame;
1391 
1392  while (1) {
1393  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1395  if (ret < 0) {
1396  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1398  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1399  } else if (flush && ret == AVERROR_EOF) {
1401  do_video_out(of, ost, NULL);
1402  }
1403  break;
1404  }
1405  if (ost->finished) {
1406  av_frame_unref(filtered_frame);
1407  continue;
1408  }
1409 
1410  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1412  ost->last_filter_pts = av_rescale_q(filtered_frame->pts, tb,
1413  AV_TIME_BASE_Q);
1414  }
1415 
1416  switch (av_buffersink_get_type(filter)) {
1417  case AVMEDIA_TYPE_VIDEO:
1418  if (!ost->frame_aspect_ratio.num)
1419  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1420 
1421  do_video_out(of, ost, filtered_frame);
1422  break;
1423  case AVMEDIA_TYPE_AUDIO:
1424  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1425  enc->ch_layout.nb_channels != filtered_frame->ch_layout.nb_channels) {
1427  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1428  break;
1429  }
1430  do_audio_out(of, ost, filtered_frame);
1431  break;
1432  default:
1433  // TODO support subtitle filters
1434  av_assert0(0);
1435  }
1436 
1437  av_frame_unref(filtered_frame);
1438  }
1439  }
1440 
1441  return 0;
1442 }
1443 
1444 static void print_final_stats(int64_t total_size)
1445 {
1446  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1447  uint64_t subtitle_size = 0;
1448  uint64_t data_size = 0;
1449  float percent = -1.0;
1450  int i, j;
1451  int pass1_used = 1;
1452 
1453  for (i = 0; i < nb_output_streams; i++) {
1455  AVCodecParameters *par = ost->st->codecpar;
1456  switch (par->codec_type) {
1457  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1458  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1459  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1460  default: other_size += ost->data_size; break;
1461  }
1462  extra_size += par->extradata_size;
1463  data_size += ost->data_size;
1464  if (ost->enc_ctx &&
1465  (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1467  pass1_used = 0;
1468  }
1469 
1470  if (data_size && total_size>0 && total_size >= data_size)
1471  percent = 100.0 * (total_size - data_size) / data_size;
1472 
1473  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1474  video_size / 1024.0,
1475  audio_size / 1024.0,
1476  subtitle_size / 1024.0,
1477  other_size / 1024.0,
1478  extra_size / 1024.0);
1479  if (percent >= 0.0)
1480  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1481  else
1482  av_log(NULL, AV_LOG_INFO, "unknown");
1483  av_log(NULL, AV_LOG_INFO, "\n");
1484 
1485  /* print verbose per-stream stats */
1486  for (i = 0; i < nb_input_files; i++) {
1487  InputFile *f = input_files[i];
1488  uint64_t total_packets = 0, total_size = 0;
1489 
1490  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1491  i, f->ctx->url);
1492 
1493  for (j = 0; j < f->nb_streams; j++) {
1494  InputStream *ist = input_streams[f->ist_index + j];
1495  enum AVMediaType type = ist->st->codecpar->codec_type;
1496 
1497  total_size += ist->data_size;
1498  total_packets += ist->nb_packets;
1499 
1500  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1502  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1503  ist->nb_packets, ist->data_size);
1504 
1505  if (ist->decoding_needed) {
1506  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1507  ist->frames_decoded);
1508  if (type == AVMEDIA_TYPE_AUDIO)
1509  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1510  av_log(NULL, AV_LOG_VERBOSE, "; ");
1511  }
1512 
1513  av_log(NULL, AV_LOG_VERBOSE, "\n");
1514  }
1515 
1516  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1517  total_packets, total_size);
1518  }
1519 
1520  for (i = 0; i < nb_output_files; i++) {
1521  OutputFile *of = output_files[i];
1522  uint64_t total_packets = 0, total_size = 0;
1523 
1524  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1525  i, of->url);
1526 
1527  for (j = 0; j < of->nb_streams; j++) {
1529  enum AVMediaType type = ost->st->codecpar->codec_type;
1530 
1531  total_size += ost->data_size;
1532  total_packets += atomic_load(&ost->packets_written);
1533 
1534  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1536  if (ost->enc_ctx) {
1537  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1538  ost->frames_encoded);
1539  if (type == AVMEDIA_TYPE_AUDIO)
1540  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1541  av_log(NULL, AV_LOG_VERBOSE, "; ");
1542  }
1543 
1544  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1545  atomic_load(&ost->packets_written), ost->data_size);
1546 
1547  av_log(NULL, AV_LOG_VERBOSE, "\n");
1548  }
1549 
1550  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1551  total_packets, total_size);
1552  }
1553  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1554  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1555  if (pass1_used) {
1556  av_log(NULL, AV_LOG_WARNING, "\n");
1557  } else {
1558  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1559  }
1560  }
1561 }
1562 
1563 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1564 {
1565  AVBPrint buf, buf_script;
1566  int64_t total_size = of_filesize(output_files[0]);
1567  int vid, i;
1568  double bitrate;
1569  double speed;
1570  int64_t pts = INT64_MIN + 1;
1571  static int64_t last_time = -1;
1572  static int first_report = 1;
1573  static int qp_histogram[52];
1574  int hours, mins, secs, us;
1575  const char *hours_sign;
1576  int ret;
1577  float t;
1578 
1579  if (!print_stats && !is_last_report && !progress_avio)
1580  return;
1581 
1582  if (!is_last_report) {
1583  if (last_time == -1) {
1584  last_time = cur_time;
1585  }
1586  if (((cur_time - last_time) < stats_period && !first_report) ||
1587  (first_report && nb_output_dumped < nb_output_files))
1588  return;
1589  last_time = cur_time;
1590  }
1591 
1592  t = (cur_time-timer_start) / 1000000.0;
1593 
1594  vid = 0;
1596  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1597  for (i = 0; i < nb_output_streams; i++) {
1598  OutputStream * const ost = output_streams[i];
1599  const AVCodecContext * const enc = ost->enc_ctx;
1600  const float q = enc ? ost->quality / (float) FF_QP2LAMBDA : -1;
1601 
1602  if (vid && ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
1603  av_bprintf(&buf, "q=%2.1f ", q);
1604  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1605  ost->file_index, ost->index, q);
1606  }
1607  if (!vid && ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
1608  float fps;
1609  uint64_t frame_number = atomic_load(&ost->packets_written);
1610 
1611  fps = t > 1 ? frame_number / t : 0;
1612  av_bprintf(&buf, "frame=%5"PRId64" fps=%3.*f q=%3.1f ",
1613  frame_number, fps < 9.95, fps, q);
1614  av_bprintf(&buf_script, "frame=%"PRId64"\n", frame_number);
1615  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1616  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1617  ost->file_index, ost->index, q);
1618  if (is_last_report)
1619  av_bprintf(&buf, "L");
1620  if (qp_hist) {
1621  int j;
1622  int qp = lrintf(q);
1623  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1624  qp_histogram[qp]++;
1625  for (j = 0; j < 32; j++)
1626  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1627  }
1628 
1629  if (enc && (enc->flags & AV_CODEC_FLAG_PSNR) &&
1630  (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1631  int j;
1632  double error, error_sum = 0;
1633  double scale, scale_sum = 0;
1634  double p;
1635  char type[3] = { 'Y','U','V' };
1636  av_bprintf(&buf, "PSNR=");
1637  for (j = 0; j < 3; j++) {
1638  if (is_last_report) {
1639  error = enc->error[j];
1640  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1641  } else {
1642  error = ost->error[j];
1643  scale = enc->width * enc->height * 255.0 * 255.0;
1644  }
1645  if (j)
1646  scale /= 4;
1647  error_sum += error;
1648  scale_sum += scale;
1649  p = psnr(error / scale);
1650  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1651  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1652  ost->file_index, ost->index, type[j] | 32, p);
1653  }
1654  p = psnr(error_sum / scale_sum);
1655  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1656  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1657  ost->file_index, ost->index, p);
1658  }
1659  vid = 1;
1660  }
1661  /* compute min output value */
1662  if (ost->last_mux_dts != AV_NOPTS_VALUE) {
1663  pts = FFMAX(pts, ost->last_mux_dts);
1664  if (copy_ts) {
1665  if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1669  }
1670  }
1671 
1672  if (is_last_report)
1673  nb_frames_drop += ost->last_dropped;
1674  }
1675 
1676  secs = FFABS(pts) / AV_TIME_BASE;
1677  us = FFABS(pts) % AV_TIME_BASE;
1678  mins = secs / 60;
1679  secs %= 60;
1680  hours = mins / 60;
1681  mins %= 60;
1682  hours_sign = (pts < 0) ? "-" : "";
1683 
1684  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1685  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1686 
1687  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1688  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1689  if (pts == AV_NOPTS_VALUE) {
1690  av_bprintf(&buf, "N/A ");
1691  } else {
1692  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1693  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1694  }
1695 
1696  if (bitrate < 0) {
1697  av_bprintf(&buf, "bitrate=N/A");
1698  av_bprintf(&buf_script, "bitrate=N/A\n");
1699  }else{
1700  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1701  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1702  }
1703 
1704  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1705  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1706  if (pts == AV_NOPTS_VALUE) {
1707  av_bprintf(&buf_script, "out_time_us=N/A\n");
1708  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1709  av_bprintf(&buf_script, "out_time=N/A\n");
1710  } else {
1711  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1712  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1713  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1714  hours_sign, hours, mins, secs, us);
1715  }
1716 
1718  av_bprintf(&buf, " dup=%"PRId64" drop=%"PRId64, nb_frames_dup, nb_frames_drop);
1719  av_bprintf(&buf_script, "dup_frames=%"PRId64"\n", nb_frames_dup);
1720  av_bprintf(&buf_script, "drop_frames=%"PRId64"\n", nb_frames_drop);
1721 
1722  if (speed < 0) {
1723  av_bprintf(&buf, " speed=N/A");
1724  av_bprintf(&buf_script, "speed=N/A\n");
1725  } else {
1726  av_bprintf(&buf, " speed=%4.3gx", speed);
1727  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1728  }
1729 
1730  if (print_stats || is_last_report) {
1731  const char end = is_last_report ? '\n' : '\r';
1732  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1733  fprintf(stderr, "%s %c", buf.str, end);
1734  } else
1735  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1736 
1737  fflush(stderr);
1738  }
1739  av_bprint_finalize(&buf, NULL);
1740 
1741  if (progress_avio) {
1742  av_bprintf(&buf_script, "progress=%s\n",
1743  is_last_report ? "end" : "continue");
1744  avio_write(progress_avio, buf_script.str,
1745  FFMIN(buf_script.len, buf_script.size - 1));
1747  av_bprint_finalize(&buf_script, NULL);
1748  if (is_last_report) {
1749  if ((ret = avio_closep(&progress_avio)) < 0)
1751  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1752  }
1753  }
1754 
1755  first_report = 0;
1756 
1757  if (is_last_report)
1758  print_final_stats(total_size);
1759 }
1760 
1762 {
1763  int ret;
1764 
1765  // We never got any input. Set a fake format, which will
1766  // come from libavformat.
1767  ifilter->format = par->format;
1768  ifilter->sample_rate = par->sample_rate;
1769  ifilter->width = par->width;
1770  ifilter->height = par->height;
1771  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1772  ret = av_channel_layout_copy(&ifilter->ch_layout, &par->ch_layout);
1773  if (ret < 0)
1774  return ret;
1775 
1776  return 0;
1777 }
1778 
1779 static void flush_encoders(void)
1780 {
1781  int i, ret;
1782 
1783  for (i = 0; i < nb_output_streams; i++) {
1785  OutputFile *of = output_files[ost->file_index];
1786  if (ost->sq_idx_encode >= 0)
1787  sq_send(of->sq_encode, ost->sq_idx_encode, SQFRAME(NULL));
1788  }
1789 
1790  for (i = 0; i < nb_output_streams; i++) {
1792  AVCodecContext *enc = ost->enc_ctx;
1793  OutputFile *of = output_files[ost->file_index];
1794 
1795  if (!enc)
1796  continue;
1797 
1798  // Try to enable encoding with no input frames.
1799  // Maybe we should just let encoding fail instead.
1800  if (!ost->initialized) {
1801  FilterGraph *fg = ost->filter->graph;
1802 
1804  "Finishing stream %d:%d without any data written to it.\n",
1805  ost->file_index, ost->st->index);
1806 
1807  if (ost->filter && !fg->graph) {
1808  int x;
1809  for (x = 0; x < fg->nb_inputs; x++) {
1810  InputFilter *ifilter = fg->inputs[x];
1811  if (ifilter->format < 0 &&
1812  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar) < 0) {
1813  av_log(NULL, AV_LOG_ERROR, "Error copying paramerets from input stream\n");
1814  exit_program(1);
1815  }
1816  }
1817 
1819  continue;
1820 
1821  ret = configure_filtergraph(fg);
1822  if (ret < 0) {
1823  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1824  exit_program(1);
1825  }
1826 
1827  output_packet(of, ost->pkt, ost, 1);
1828  }
1829 
1831  }
1832 
1834  continue;
1835 
1836  ret = submit_encode_frame(of, ost, NULL);
1837  if (ret != AVERROR_EOF)
1838  exit_program(1);
1839  }
1840 }
1841 
1842 /*
1843  * Check whether a packet from ist should be written into ost at this time
1844  */
1846 {
1847  OutputFile *of = output_files[ost->file_index];
1848  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1849 
1850  if (ost->source_index != ist_index)
1851  return 0;
1852 
1853  if (ost->finished & MUXER_FINISHED)
1854  return 0;
1855 
1856  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1857  return 0;
1858 
1859  return 1;
1860 }
1861 
1863 {
1864  OutputFile *of = output_files[ost->file_index];
1865  InputFile *f = input_files [ist->file_index];
1866  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1867  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1868  AVPacket *opkt = ost->pkt;
1869 
1870  av_packet_unref(opkt);
1871  // EOF: flush output bitstream filters.
1872  if (!pkt) {
1873  output_packet(of, opkt, ost, 1);
1874  return;
1875  }
1876 
1877  if (!ost->streamcopy_started && !(pkt->flags & AV_PKT_FLAG_KEY) &&
1878  !ost->copy_initial_nonkeyframes)
1879  return;
1880 
1881  if (!ost->streamcopy_started && !ost->copy_prior_start) {
1882  if (pkt->pts == AV_NOPTS_VALUE ?
1883  ist->pts < ost->ts_copy_start :
1884  pkt->pts < av_rescale_q(ost->ts_copy_start, AV_TIME_BASE_Q, ist->st->time_base))
1885  return;
1886  }
1887 
1888  if (of->recording_time != INT64_MAX &&
1889  ist->pts >= of->recording_time + start_time) {
1891  return;
1892  }
1893 
1894  if (f->recording_time != INT64_MAX) {
1895  start_time = 0;
1896  if (copy_ts) {
1897  start_time += f->start_time != AV_NOPTS_VALUE ? f->start_time : 0;
1898  start_time += start_at_zero ? 0 : f->ctx->start_time;
1899  }
1900  if (ist->pts >= f->recording_time + start_time) {
1902  return;
1903  }
1904  }
1905 
1906  if (av_packet_ref(opkt, pkt) < 0)
1907  exit_program(1);
1908 
1909  if (pkt->pts != AV_NOPTS_VALUE)
1910  opkt->pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
1911 
1912  if (pkt->dts == AV_NOPTS_VALUE) {
1913  opkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
1914  } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
1916  if(!duration)
1917  duration = ist->st->codecpar->frame_size;
1918  opkt->dts = av_rescale_delta(ist->st->time_base, pkt->dts,
1919  (AVRational){1, ist->st->codecpar->sample_rate}, duration,
1920  &ist->filter_in_rescale_delta_last, ost->mux_timebase);
1921  /* dts will be set immediately afterwards to what pts is now */
1922  opkt->pts = opkt->dts - ost_tb_start_time;
1923  } else
1924  opkt->dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
1925  opkt->dts -= ost_tb_start_time;
1926 
1927  opkt->duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
1928 
1929  ost->sync_opts += opkt->duration;
1930 
1931  output_packet(of, opkt, ost, 0);
1932 
1933  ost->streamcopy_started = 1;
1934 }
1935 
1936 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1937 {
1938  if (*got_output || ret<0)
1939  decode_error_stat[ret<0] ++;
1940 
1941  if (ret < 0 && exit_on_error)
1942  exit_program(1);
1943 
1944  if (*got_output && ist) {
1947  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
1948  if (exit_on_error)
1949  exit_program(1);
1950  }
1951  }
1952 }
1953 
1954 // Filters can be configured only if the formats of all inputs are known.
1956 {
1957  int i;
1958  for (i = 0; i < fg->nb_inputs; i++) {
1959  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
1960  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
1961  return 0;
1962  }
1963  return 1;
1964 }
1965 
1966 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_reference)
1967 {
1968  FilterGraph *fg = ifilter->graph;
1969  AVFrameSideData *sd;
1970  int need_reinit, ret;
1971  int buffersrc_flags = AV_BUFFERSRC_FLAG_PUSH;
1972 
1973  if (keep_reference)
1974  buffersrc_flags |= AV_BUFFERSRC_FLAG_KEEP_REF;
1975 
1976  /* determine if the parameters for this input changed */
1977  need_reinit = ifilter->format != frame->format;
1978 
1979  switch (ifilter->ist->st->codecpar->codec_type) {
1980  case AVMEDIA_TYPE_AUDIO:
1981  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
1982  av_channel_layout_compare(&ifilter->ch_layout, &frame->ch_layout);
1983  break;
1984  case AVMEDIA_TYPE_VIDEO:
1985  need_reinit |= ifilter->width != frame->width ||
1986  ifilter->height != frame->height;
1987  break;
1988  }
1989 
1990  if (!ifilter->ist->reinit_filters && fg->graph)
1991  need_reinit = 0;
1992 
1993  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
1994  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
1995  need_reinit = 1;
1996 
1998  if (!ifilter->displaymatrix || memcmp(sd->data, ifilter->displaymatrix, sizeof(int32_t) * 9))
1999  need_reinit = 1;
2000  } else if (ifilter->displaymatrix)
2001  need_reinit = 1;
2002 
2003  if (need_reinit) {
2005  if (ret < 0)
2006  return ret;
2007  }
2008 
2009  /* (re)init the graph if possible, otherwise buffer the frame and return */
2010  if (need_reinit || !fg->graph) {
2011  if (!ifilter_has_all_input_formats(fg)) {
2013  if (!tmp)
2014  return AVERROR(ENOMEM);
2015 
2016  ret = av_fifo_write(ifilter->frame_queue, &tmp, 1);
2017  if (ret < 0)
2018  av_frame_free(&tmp);
2019 
2020  return ret;
2021  }
2022 
2023  ret = reap_filters(1);
2024  if (ret < 0 && ret != AVERROR_EOF) {
2025  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2026  return ret;
2027  }
2028 
2029  ret = configure_filtergraph(fg);
2030  if (ret < 0) {
2031  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2032  return ret;
2033  }
2034  }
2035 
2036  ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, buffersrc_flags);
2037  if (ret < 0) {
2038  if (ret != AVERROR_EOF)
2039  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2040  return ret;
2041  }
2042 
2043  return 0;
2044 }
2045 
2046 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2047 {
2048  int ret;
2049 
2050  ifilter->eof = 1;
2051 
2052  if (ifilter->filter) {
2054  if (ret < 0)
2055  return ret;
2056  } else {
2057  // the filtergraph was never configured
2058  if (ifilter->format < 0) {
2059  ret = ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2060  if (ret < 0)
2061  return ret;
2062  }
2063  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2064  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2065  return AVERROR_INVALIDDATA;
2066  }
2067  }
2068 
2069  return 0;
2070 }
2071 
2072 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2073 // There is the following difference: if you got a frame, you must call
2074 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2075 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2076 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2077 {
2078  int ret;
2079 
2080  *got_frame = 0;
2081 
2082  if (pkt) {
2083  ret = avcodec_send_packet(avctx, pkt);
2084  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2085  // decoded frames with avcodec_receive_frame() until done.
2086  if (ret < 0 && ret != AVERROR_EOF)
2087  return ret;
2088  }
2089 
2090  ret = avcodec_receive_frame(avctx, frame);
2091  if (ret < 0 && ret != AVERROR(EAGAIN))
2092  return ret;
2093  if (ret >= 0)
2094  *got_frame = 1;
2095 
2096  return 0;
2097 }
2098 
2100 {
2101  int i, ret;
2102 
2103  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2104  for (i = 0; i < ist->nb_filters; i++) {
2105  ret = ifilter_send_frame(ist->filters[i], decoded_frame, i < ist->nb_filters - 1);
2106  if (ret == AVERROR_EOF)
2107  ret = 0; /* ignore */
2108  if (ret < 0) {
2110  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2111  break;
2112  }
2113  }
2114  return ret;
2115 }
2116 
2118  int *decode_failed)
2119 {
2121  AVCodecContext *avctx = ist->dec_ctx;
2122  int ret, err = 0;
2123  AVRational decoded_frame_tb;
2124 
2126  ret = decode(avctx, decoded_frame, got_output, pkt);
2127  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2128  if (ret < 0)
2129  *decode_failed = 1;
2130 
2131  if (ret >= 0 && avctx->sample_rate <= 0) {
2132  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2134  }
2135 
2136  if (ret != AVERROR_EOF)
2138 
2139  if (!*got_output || ret < 0)
2140  return ret;
2141 
2143  ist->frames_decoded++;
2144 
2145  /* increment next_dts to use for the case where the input stream does not
2146  have timestamps or there are multiple frames in the packet */
2147  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2148  avctx->sample_rate;
2149  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2150  avctx->sample_rate;
2151 
2152  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2153  decoded_frame_tb = ist->st->time_base;
2154  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2155  decoded_frame->pts = pkt->pts;
2156  decoded_frame_tb = ist->st->time_base;
2157  }else {
2158  decoded_frame->pts = ist->dts;
2159  decoded_frame_tb = AV_TIME_BASE_Q;
2160  }
2161  if (pkt && pkt->duration && ist->prev_pkt_pts != AV_NOPTS_VALUE &&
2162  pkt->pts != AV_NOPTS_VALUE && pkt->pts - ist->prev_pkt_pts > pkt->duration)
2164  if (pkt)
2165  ist->prev_pkt_pts = pkt->pts;
2167  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2168  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2169  (AVRational){1, avctx->sample_rate});
2172 
2174  return err < 0 ? err : ret;
2175 }
2176 
2177 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2178  int *decode_failed)
2179 {
2181  int i, ret = 0, err = 0;
2182  int64_t best_effort_timestamp;
2183  int64_t dts = AV_NOPTS_VALUE;
2184 
2185  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2186  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2187  // skip the packet.
2188  if (!eof && pkt && pkt->size == 0)
2189  return 0;
2190 
2191  if (ist->dts != AV_NOPTS_VALUE)
2192  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2193  if (pkt) {
2194  pkt->dts = dts; // ffmpeg.c probably shouldn't do this
2195  }
2196 
2197  // The old code used to set dts on the drain packet, which does not work
2198  // with the new API anymore.
2199  if (eof) {
2200  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2201  if (!new)
2202  return AVERROR(ENOMEM);
2203  ist->dts_buffer = new;
2204  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2205  }
2206 
2209  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2210  if (ret < 0)
2211  *decode_failed = 1;
2212 
2213  // The following line may be required in some cases where there is no parser
2214  // or the parser does not has_b_frames correctly
2215  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2216  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2217  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2218  } else
2220  "video_delay is larger in decoder than demuxer %d > %d.\n"
2221  "If you want to help, upload a sample "
2222  "of this file to https://streams.videolan.org/upload/ "
2223  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2224  ist->dec_ctx->has_b_frames,
2225  ist->st->codecpar->video_delay);
2226  }
2227 
2228  if (ret != AVERROR_EOF)
2230 
2231  if (*got_output && ret >= 0) {
2232  if (ist->dec_ctx->width != decoded_frame->width ||
2233  ist->dec_ctx->height != decoded_frame->height ||
2234  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2235  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2239  ist->dec_ctx->width,
2240  ist->dec_ctx->height,
2241  ist->dec_ctx->pix_fmt);
2242  }
2243  }
2244 
2245  if (!*got_output || ret < 0)
2246  return ret;
2247 
2248  if(ist->top_field_first>=0)
2250 
2251  ist->frames_decoded++;
2252 
2254  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2255  if (err < 0)
2256  goto fail;
2257  }
2258 
2259  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2260  *duration_pts = decoded_frame->duration;
2261 
2262  if (ist->framerate.num)
2263  best_effort_timestamp = ist->cfr_next_pts++;
2264 
2265  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2266  best_effort_timestamp = ist->dts_buffer[0];
2267 
2268  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2269  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2270  ist->nb_dts_buffer--;
2271  }
2272 
2273  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2274  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2275 
2276  if (ts != AV_NOPTS_VALUE)
2277  ist->next_pts = ist->pts = ts;
2278  }
2279 
2280  if (debug_ts) {
2281  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2282  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2283  ist->st->index, av_ts2str(decoded_frame->pts),
2285  best_effort_timestamp,
2286  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2288  ist->st->time_base.num, ist->st->time_base.den);
2289  }
2290 
2291  if (ist->st->sample_aspect_ratio.num)
2293 
2295 
2296 fail:
2298  return err < 0 ? err : ret;
2299 }
2300 
2302  int *decode_failed)
2303 {
2305  int free_sub = 1;
2306  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2307  &subtitle, got_output, pkt);
2308 
2310 
2311  if (ret < 0 || !*got_output) {
2312  *decode_failed = 1;
2313  if (!pkt->size)
2314  sub2video_flush(ist);
2315  return ret;
2316  }
2317 
2318  if (ist->fix_sub_duration) {
2319  int end = 1;
2320  if (ist->prev_sub.got_output) {
2321  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2322  1000, AV_TIME_BASE);
2323  if (end < ist->prev_sub.subtitle.end_display_time) {
2325  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2327  end <= 0 ? ", dropping it" : "");
2328  ist->prev_sub.subtitle.end_display_time = end;
2329  }
2330  }
2331  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2332  FFSWAP(int, ret, ist->prev_sub.ret);
2334  if (end <= 0)
2335  goto out;
2336  }
2337 
2338  if (!*got_output)
2339  return ret;
2340 
2341  if (ist->sub2video.frame) {
2342  sub2video_update(ist, INT64_MIN, &subtitle);
2343  } else if (ist->nb_filters) {
2344  if (!ist->sub2video.sub_queue)
2346  if (!ist->sub2video.sub_queue)
2347  exit_program(1);
2348 
2350  if (ret < 0)
2351  exit_program(1);
2352  free_sub = 0;
2353  }
2354 
2355  if (!subtitle.num_rects)
2356  goto out;
2357 
2358  ist->frames_decoded++;
2359 
2360  for (i = 0; i < nb_output_streams; i++) {
2362 
2363  if (!check_output_constraints(ist, ost) || !ost->enc_ctx
2364  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2365  continue;
2366 
2367  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2368  }
2369 
2370 out:
2371  if (free_sub)
2373  return ret;
2374 }
2375 
2377 {
2378  int i, ret;
2379  /* TODO keep pts also in stream time base to avoid converting back */
2380  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2382 
2383  for (i = 0; i < ist->nb_filters; i++) {
2384  ret = ifilter_send_eof(ist->filters[i], pts);
2385  if (ret < 0)
2386  return ret;
2387  }
2388  return 0;
2389 }
2390 
2391 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2392 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2393 {
2394  const AVCodecParameters *par = ist->st->codecpar;
2395  int ret = 0, i;
2396  int repeating = 0;
2397  int eof_reached = 0;
2398 
2399  AVPacket *avpkt = ist->pkt;
2400 
2401  if (!ist->saw_first_ts) {
2402  ist->first_dts =
2403  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2404  ist->pts = 0;
2405  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2406  ist->first_dts =
2407  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2408  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2409  }
2410  ist->saw_first_ts = 1;
2411  }
2412 
2413  if (ist->next_dts == AV_NOPTS_VALUE)
2414  ist->next_dts = ist->dts;
2415  if (ist->next_pts == AV_NOPTS_VALUE)
2416  ist->next_pts = ist->pts;
2417 
2418  if (pkt) {
2419  av_packet_unref(avpkt);
2420  ret = av_packet_ref(avpkt, pkt);
2421  if (ret < 0)
2422  return ret;
2423  }
2424 
2425  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2426  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2427  if (par->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2428  ist->next_pts = ist->pts = ist->dts;
2429  }
2430 
2431  // while we have more to decode or while the decoder did output something on EOF
2432  while (ist->decoding_needed) {
2433  int64_t duration_dts = 0;
2434  int64_t duration_pts = 0;
2435  int got_output = 0;
2436  int decode_failed = 0;
2437 
2438  ist->pts = ist->next_pts;
2439  ist->dts = ist->next_dts;
2440 
2441  switch (par->codec_type) {
2442  case AVMEDIA_TYPE_AUDIO:
2443  ret = decode_audio (ist, repeating ? NULL : avpkt, &got_output,
2444  &decode_failed);
2445  av_packet_unref(avpkt);
2446  break;
2447  case AVMEDIA_TYPE_VIDEO:
2448  ret = decode_video (ist, repeating ? NULL : avpkt, &got_output, &duration_pts, !pkt,
2449  &decode_failed);
2450  if (!repeating || !pkt || got_output) {
2451  if (pkt && pkt->duration) {
2452  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2453  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2455  duration_dts = ((int64_t)AV_TIME_BASE *
2456  ist->dec_ctx->framerate.den * ticks) /
2458  }
2459 
2460  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2461  ist->next_dts += duration_dts;
2462  }else
2463  ist->next_dts = AV_NOPTS_VALUE;
2464  }
2465 
2466  if (got_output) {
2467  if (duration_pts > 0) {
2468  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2469  } else {
2470  ist->next_pts += duration_dts;
2471  }
2472  }
2473  av_packet_unref(avpkt);
2474  break;
2475  case AVMEDIA_TYPE_SUBTITLE:
2476  if (repeating)
2477  break;
2478  ret = transcode_subtitles(ist, avpkt, &got_output, &decode_failed);
2479  if (!pkt && ret >= 0)
2480  ret = AVERROR_EOF;
2481  av_packet_unref(avpkt);
2482  break;
2483  default:
2484  return -1;
2485  }
2486 
2487  if (ret == AVERROR_EOF) {
2488  eof_reached = 1;
2489  break;
2490  }
2491 
2492  if (ret < 0) {
2493  if (decode_failed) {
2494  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2495  ist->file_index, ist->st->index, av_err2str(ret));
2496  } else {
2497  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2498  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2499  }
2500  if (!decode_failed || exit_on_error)
2501  exit_program(1);
2502  break;
2503  }
2504 
2505  if (got_output)
2506  ist->got_output = 1;
2507 
2508  if (!got_output)
2509  break;
2510 
2511  // During draining, we might get multiple output frames in this loop.
2512  // ffmpeg.c does not drain the filter chain on configuration changes,
2513  // which means if we send multiple frames at once to the filters, and
2514  // one of those frames changes configuration, the buffered frames will
2515  // be lost. This can upset certain FATE tests.
2516  // Decode only 1 frame per call on EOF to appease these FATE tests.
2517  // The ideal solution would be to rewrite decoding to use the new
2518  // decoding API in a better way.
2519  if (!pkt)
2520  break;
2521 
2522  repeating = 1;
2523  }
2524 
2525  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2526  /* except when looping we need to flush but not to send an EOF */
2527  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2528  int ret = send_filter_eof(ist);
2529  if (ret < 0) {
2530  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2531  exit_program(1);
2532  }
2533  }
2534 
2535  /* handle stream copy */
2536  if (!ist->decoding_needed && pkt) {
2537  ist->dts = ist->next_dts;
2538  switch (par->codec_type) {
2539  case AVMEDIA_TYPE_AUDIO:
2540  av_assert1(pkt->duration >= 0);
2541  if (par->sample_rate) {
2542  ist->next_dts += ((int64_t)AV_TIME_BASE * par->frame_size) /
2543  par->sample_rate;
2544  } else {
2546  }
2547  break;
2548  case AVMEDIA_TYPE_VIDEO:
2549  if (ist->framerate.num) {
2550  // TODO: Remove work-around for c99-to-c89 issue 7
2551  AVRational time_base_q = AV_TIME_BASE_Q;
2552  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2553  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2554  } else if (pkt->duration) {
2556  } else if(ist->dec_ctx->framerate.num != 0) {
2557  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2558  ist->next_dts += ((int64_t)AV_TIME_BASE *
2559  ist->dec_ctx->framerate.den * ticks) /
2561  }
2562  break;
2563  }
2564  ist->pts = ist->dts;
2565  ist->next_pts = ist->next_dts;
2566  } else if (!ist->decoding_needed)
2567  eof_reached = 1;
2568 
2569  for (i = 0; i < nb_output_streams; i++) {
2571 
2572  if (!check_output_constraints(ist, ost) || ost->enc_ctx ||
2573  (!pkt && no_eof))
2574  continue;
2575 
2576  do_streamcopy(ist, ost, pkt);
2577  }
2578 
2579  return !eof_reached;
2580 }
2581 
2583 {
2584  InputStream *ist = s->opaque;
2585  const enum AVPixelFormat *p;
2586  int ret;
2587 
2588  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2590  const AVCodecHWConfig *config = NULL;
2591  int i;
2592 
2593  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2594  break;
2595 
2596  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2597  ist->hwaccel_id == HWACCEL_AUTO) {
2598  for (i = 0;; i++) {
2599  config = avcodec_get_hw_config(s->codec, i);
2600  if (!config)
2601  break;
2602  if (!(config->methods &
2604  continue;
2605  if (config->pix_fmt == *p)
2606  break;
2607  }
2608  }
2609  if (config && config->device_type == ist->hwaccel_device_type) {
2611  if (ret < 0) {
2612  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2614  "%s hwaccel requested for input stream #%d:%d, "
2615  "but cannot be initialized.\n",
2616  av_hwdevice_get_type_name(config->device_type),
2617  ist->file_index, ist->st->index);
2618  return AV_PIX_FMT_NONE;
2619  }
2620  continue;
2621  }
2622 
2623  ist->hwaccel_pix_fmt = *p;
2624  break;
2625  }
2626  }
2627 
2628  return *p;
2629 }
2630 
2631 static int init_input_stream(int ist_index, char *error, int error_len)
2632 {
2633  int ret;
2634  InputStream *ist = input_streams[ist_index];
2635 
2636  if (ist->decoding_needed) {
2637  const AVCodec *codec = ist->dec;
2638  if (!codec) {
2639  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2640  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2641  return AVERROR(EINVAL);
2642  }
2643 
2644  ist->dec_ctx->opaque = ist;
2645  ist->dec_ctx->get_format = get_format;
2646 #if LIBAVCODEC_VERSION_MAJOR < 60
2648  ist->dec_ctx->thread_safe_callbacks = 1;
2649  })
2650 #endif
2651 
2652  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2653  (ist->decoding_needed & DECODING_FOR_OST)) {
2654  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2656  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2657  }
2658 
2659  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2660  * audio, and video decoders such as cuvid or mediacodec */
2661  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2662 
2663  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2664  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2665  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2667  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2668 
2670  if (ret < 0) {
2671  snprintf(error, error_len, "Device setup failed for "
2672  "decoder on input stream #%d:%d : %s",
2673  ist->file_index, ist->st->index, av_err2str(ret));
2674  return ret;
2675  }
2676 
2677  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2678  if (ret == AVERROR_EXPERIMENTAL)
2679  abort_codec_experimental(codec, 0);
2680 
2681  snprintf(error, error_len,
2682  "Error while opening decoder for input stream "
2683  "#%d:%d : %s",
2684  ist->file_index, ist->st->index, av_err2str(ret));
2685  return ret;
2686  }
2688  }
2689 
2690  ist->next_pts = AV_NOPTS_VALUE;
2691  ist->next_dts = AV_NOPTS_VALUE;
2692 
2693  return 0;
2694 }
2695 
2697 {
2698  if (ost->source_index >= 0)
2699  return input_streams[ost->source_index];
2700  return NULL;
2701 }
2702 
2703 static int compare_int64(const void *a, const void *b)
2704 {
2705  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2706 }
2707 
2709 {
2710  AVBSFContext *ctx = ost->bsf_ctx;
2711  int ret;
2712 
2713  if (!ctx)
2714  return 0;
2715 
2716  ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
2717  if (ret < 0)
2718  return ret;
2719 
2720  ctx->time_base_in = ost->st->time_base;
2721 
2722  ret = av_bsf_init(ctx);
2723  if (ret < 0) {
2724  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2725  ctx->filter->name);
2726  return ret;
2727  }
2728 
2729  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2730  if (ret < 0)
2731  return ret;
2732  ost->st->time_base = ctx->time_base_out;
2733 
2734  return 0;
2735 }
2736 
2738 {
2739  OutputFile *of = output_files[ost->file_index];
2742  AVCodecParameters *par = ost->st->codecpar;
2743  AVCodecContext *codec_ctx;
2744  AVRational sar;
2745  int i, ret;
2746  uint32_t codec_tag = par->codec_tag;
2747 
2748  av_assert0(ist && !ost->filter);
2749 
2750  codec_ctx = avcodec_alloc_context3(NULL);
2751  if (!codec_ctx)
2752  return AVERROR(ENOMEM);
2753 
2754  ret = avcodec_parameters_to_context(codec_ctx, ist->st->codecpar);
2755  if (ret >= 0)
2756  ret = av_opt_set_dict(codec_ctx, &ost->encoder_opts);
2757  if (ret < 0) {
2759  "Error setting up codec context options.\n");
2760  avcodec_free_context(&codec_ctx);
2761  return ret;
2762  }
2763 
2764  ret = avcodec_parameters_from_context(par, codec_ctx);
2765  avcodec_free_context(&codec_ctx);
2766  if (ret < 0) {
2768  "Error getting reference codec parameters.\n");
2769  return ret;
2770  }
2771 
2772  if (!codec_tag) {
2773  unsigned int codec_tag_tmp;
2774  if (!of->format->codec_tag ||
2775  av_codec_get_id (of->format->codec_tag, par->codec_tag) == par->codec_id ||
2776  !av_codec_get_tag2(of->format->codec_tag, par->codec_id, &codec_tag_tmp))
2777  codec_tag = par->codec_tag;
2778  }
2779 
2780  par->codec_tag = codec_tag;
2781 
2782  if (!ost->frame_rate.num)
2783  ost->frame_rate = ist->framerate;
2784 
2785  if (ost->frame_rate.num)
2786  ost->st->avg_frame_rate = ost->frame_rate;
2787  else
2788  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2789 
2791  if (ret < 0)
2792  return ret;
2793 
2794  // copy timebase while removing common factors
2795  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) {
2796  if (ost->frame_rate.num)
2797  ost->st->time_base = av_inv_q(ost->frame_rate);
2798  else
2800  }
2801 
2802  // copy estimated duration as a hint to the muxer
2803  if (ost->st->duration <= 0 && ist->st->duration > 0)
2804  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
2805 
2806  if (!ost->copy_prior_start) {
2807  ost->ts_copy_start = (of->start_time == AV_NOPTS_VALUE) ?
2808  0 : of->start_time;
2809  if (copy_ts && ifile->start_time != AV_NOPTS_VALUE) {
2810  ost->ts_copy_start = FFMAX(ost->ts_copy_start,
2811  ifile->start_time + ifile->ts_offset);
2812  }
2813  }
2814 
2815  if (ist->st->nb_side_data) {
2816  for (i = 0; i < ist->st->nb_side_data; i++) {
2817  const AVPacketSideData *sd_src = &ist->st->side_data[i];
2818  uint8_t *dst_data;
2819 
2820  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
2821  if (!dst_data)
2822  return AVERROR(ENOMEM);
2823  memcpy(dst_data, sd_src->data, sd_src->size);
2824  }
2825  }
2826 
2827  if (ost->rotate_overridden) {
2829  sizeof(int32_t) * 9);
2830  if (sd)
2831  av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
2832  }
2833 
2834  switch (par->codec_type) {
2835  case AVMEDIA_TYPE_AUDIO:
2836  if ((par->block_align == 1 || par->block_align == 1152 || par->block_align == 576) &&
2837  par->codec_id == AV_CODEC_ID_MP3)
2838  par->block_align = 0;
2839  if (par->codec_id == AV_CODEC_ID_AC3)
2840  par->block_align = 0;
2841  break;
2842  case AVMEDIA_TYPE_VIDEO:
2843  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2844  sar =
2845  av_mul_q(ost->frame_aspect_ratio,
2846  (AVRational){ par->height, par->width });
2847  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2848  "with stream copy may produce invalid files\n");
2849  }
2850  else if (ist->st->sample_aspect_ratio.num)
2851  sar = ist->st->sample_aspect_ratio;
2852  else
2853  sar = par->sample_aspect_ratio;
2854  ost->st->sample_aspect_ratio = par->sample_aspect_ratio = sar;
2855  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2856  ost->st->r_frame_rate = ist->st->r_frame_rate;
2857  break;
2858  }
2859 
2860  ost->mux_timebase = ist->st->time_base;
2861 
2862  return 0;
2863 }
2864 
2866 {
2867  uint8_t *encoder_string;
2868  int encoder_string_len;
2869 
2870  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2871  return;
2872 
2873  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2874  encoder_string = av_mallocz(encoder_string_len);
2875  if (!encoder_string)
2876  exit_program(1);
2877 
2878  if (!of->bitexact && !ost->bitexact)
2879  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2880  else
2881  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2882  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2883  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2885 }
2886 
2888  AVCodecContext *avctx)
2889 {
2890  char *p;
2891  int n = 1, i, size, index = 0;
2892  int64_t t, *pts;
2893 
2894  for (p = kf; *p; p++)
2895  if (*p == ',')
2896  n++;
2897  size = n;
2898  pts = av_malloc_array(size, sizeof(*pts));
2899  if (!pts) {
2900  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2901  exit_program(1);
2902  }
2903 
2904  p = kf;
2905  for (i = 0; i < n; i++) {
2906  char *next = strchr(p, ',');
2907 
2908  if (next)
2909  *next++ = 0;
2910 
2911  if (!memcmp(p, "chapters", 8)) {
2912  OutputFile *of = output_files[ost->file_index];
2913  AVChapter * const *ch;
2914  unsigned int nb_ch;
2915  int j;
2916 
2917  ch = of_get_chapters(of, &nb_ch);
2918 
2919  if (nb_ch > INT_MAX - size ||
2920  !(pts = av_realloc_f(pts, size += nb_ch - 1,
2921  sizeof(*pts)))) {
2923  "Could not allocate forced key frames array.\n");
2924  exit_program(1);
2925  }
2926  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2927  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2928 
2929  for (j = 0; j < nb_ch; j++) {
2930  const AVChapter *c = ch[j];
2931  av_assert1(index < size);
2932  pts[index++] = av_rescale_q(c->start, c->time_base,
2933  avctx->time_base) + t;
2934  }
2935 
2936  } else {
2937 
2938  t = parse_time_or_die("force_key_frames", p, 1);
2939  av_assert1(index < size);
2940  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2941 
2942  }
2943 
2944  p = next;
2945  }
2946 
2947  av_assert0(index == size);
2948  qsort(pts, size, sizeof(*pts), compare_int64);
2949  ost->forced_kf_count = size;
2950  ost->forced_kf_pts = pts;
2951 }
2952 
2953 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
2954 {
2956  AVCodecContext *enc_ctx = ost->enc_ctx;
2957 
2958  if (ost->enc_timebase.num > 0) {
2959  enc_ctx->time_base = ost->enc_timebase;
2960  return;
2961  }
2962 
2963  if (ost->enc_timebase.num < 0) {
2964  if (ist) {
2965  enc_ctx->time_base = ist->st->time_base;
2966  return;
2967  }
2968 
2970  "Input stream data for output stream #%d:%d not available, "
2971  "using default time base\n", ost->file_index, ost->index);
2972  }
2973 
2974  enc_ctx->time_base = default_time_base;
2975 }
2976 
2978 {
2980  AVCodecContext *enc_ctx = ost->enc_ctx;
2982  OutputFile *of = output_files[ost->file_index];
2983  int ret;
2984 
2985  set_encoder_id(output_files[ost->file_index], ost);
2986 
2987  if (ist) {
2988  dec_ctx = ist->dec_ctx;
2989  }
2990 
2991  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2992  if (!ost->frame_rate.num)
2993  ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
2994  if (ist && !ost->frame_rate.num && !ost->max_frame_rate.num) {
2995  ost->frame_rate = (AVRational){25, 1};
2997  "No information "
2998  "about the input framerate is available. Falling "
2999  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3000  "if you want a different framerate.\n",
3001  ost->file_index, ost->index);
3002  }
3003 
3004  if (ost->max_frame_rate.num &&
3005  (av_q2d(ost->frame_rate) > av_q2d(ost->max_frame_rate) ||
3006  !ost->frame_rate.den))
3007  ost->frame_rate = ost->max_frame_rate;
3008 
3009  if (ost->enc->supported_framerates && !ost->force_fps) {
3010  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3011  ost->frame_rate = ost->enc->supported_framerates[idx];
3012  }
3013  // reduce frame rate for mpeg4 to be within the spec limits
3014  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3015  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3016  ost->frame_rate.num, ost->frame_rate.den, 65535);
3017  }
3018  }
3019 
3020  switch (enc_ctx->codec_type) {
3021  case AVMEDIA_TYPE_AUDIO:
3022  enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3023  enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3024  ret = av_buffersink_get_ch_layout(ost->filter->filter, &enc_ctx->ch_layout);
3025  if (ret < 0)
3026  return ret;
3027 
3028  if (ost->bits_per_raw_sample)
3029  enc_ctx->bits_per_raw_sample = ost->bits_per_raw_sample;
3030  else if (dec_ctx && ost->filter->graph->is_meta)
3032  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3033 
3035  break;
3036 
3037  case AVMEDIA_TYPE_VIDEO:
3038  init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3039 
3040  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3041  enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3042  if ( av_q2d(enc_ctx->time_base) < 0.001 && ost->vsync_method != VSYNC_PASSTHROUGH
3043  && (ost->vsync_method == VSYNC_CFR || ost->vsync_method == VSYNC_VSCFR ||
3044  (ost->vsync_method == VSYNC_AUTO && !(of->format->flags & AVFMT_VARIABLE_FPS)))){
3045  av_log(NULL, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3046  "Please consider specifying a lower framerate, a different muxer or "
3047  "setting vsync/fps_mode to vfr\n");
3048  }
3049 
3050  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3051  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3052  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3053  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3054  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3055  av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3056 
3057  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3058 
3059  if (ost->bits_per_raw_sample)
3060  enc_ctx->bits_per_raw_sample = ost->bits_per_raw_sample;
3061  else if (dec_ctx && ost->filter->graph->is_meta)
3063  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3064 
3065  if (frame) {
3066  enc_ctx->color_range = frame->color_range;
3067  enc_ctx->color_primaries = frame->color_primaries;
3068  enc_ctx->color_trc = frame->color_trc;
3069  enc_ctx->colorspace = frame->colorspace;
3070  enc_ctx->chroma_sample_location = frame->chroma_location;
3071  }
3072 
3073  enc_ctx->framerate = ost->frame_rate;
3074 
3075  ost->st->avg_frame_rate = ost->frame_rate;
3076 
3077  // Field order: autodetection
3078  if (frame) {
3080  ost->top_field_first >= 0)
3081  frame->top_field_first = !!ost->top_field_first;
3082 
3083  if (frame->interlaced_frame) {
3084  if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
3085  enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
3086  else
3087  enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
3088  } else
3089  enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
3090  }
3091 
3092  // Field order: override
3093  if (ost->top_field_first == 0) {
3094  enc_ctx->field_order = AV_FIELD_BB;
3095  } else if (ost->top_field_first == 1) {
3096  enc_ctx->field_order = AV_FIELD_TT;
3097  }
3098 
3099  if (ost->forced_keyframes) {
3100  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3101  ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3103  if (ret < 0) {
3105  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3106  return ret;
3107  }
3108  ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3109  ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3110  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3111  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3112 
3113  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3114  // parse it only for static kf timings
3115  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3116  parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3117  }
3118  }
3119  break;
3120  case AVMEDIA_TYPE_SUBTITLE:
3121  enc_ctx->time_base = AV_TIME_BASE_Q;
3122  if (!enc_ctx->width) {
3123  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3124  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3125  }
3126  break;
3127  case AVMEDIA_TYPE_DATA:
3128  break;
3129  default:
3130  abort();
3131  break;
3132  }
3133 
3134  if (ost->bitexact)
3135  enc_ctx->flags |= AV_CODEC_FLAG_BITEXACT;
3136 
3137  if (ost->sq_idx_encode >= 0)
3138  sq_set_tb(of->sq_encode, ost->sq_idx_encode, enc_ctx->time_base);
3139 
3140  ost->mux_timebase = enc_ctx->time_base;
3141 
3142  return 0;
3143 }
3144 
3146  char *error, int error_len)
3147 {
3148  OutputFile *of = output_files[ost->file_index];
3149  int ret = 0;
3150 
3151  if (ost->enc_ctx) {
3152  const AVCodec *codec = ost->enc;
3153  AVCodecContext *dec = NULL;
3154  InputStream *ist;
3155 
3157  if (ret < 0)
3158  return ret;
3159 
3160  if ((ist = get_input_stream(ost)))
3161  dec = ist->dec_ctx;
3162  if (dec && dec->subtitle_header) {
3163  /* ASS code assumes this buffer is null terminated so add extra byte. */
3164  ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3165  if (!ost->enc_ctx->subtitle_header)
3166  return AVERROR(ENOMEM);
3167  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3168  ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3169  }
3170  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3171  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3172 
3174  if (ret < 0) {
3175  snprintf(error, error_len, "Device setup failed for "
3176  "encoder on output stream #%d:%d : %s",
3177  ost->file_index, ost->index, av_err2str(ret));
3178  return ret;
3179  }
3180 
3181  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3182  int input_props = 0, output_props = 0;
3183  AVCodecDescriptor const *input_descriptor =
3184  avcodec_descriptor_get(dec->codec_id);
3185  AVCodecDescriptor const *output_descriptor =
3186  avcodec_descriptor_get(ost->enc_ctx->codec_id);
3187  if (input_descriptor)
3188  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3189  if (output_descriptor)
3190  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3191  if (input_props && output_props && input_props != output_props) {
3192  snprintf(error, error_len,
3193  "Subtitle encoding currently only possible from text to text "
3194  "or bitmap to bitmap");
3195  return AVERROR_INVALIDDATA;
3196  }
3197  }
3198 
3199  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3200  if (ret == AVERROR_EXPERIMENTAL)
3201  abort_codec_experimental(codec, 1);
3202  snprintf(error, error_len,
3203  "Error while opening encoder for output stream #%d:%d - "
3204  "maybe incorrect parameters such as bit_rate, rate, width or height",
3205  ost->file_index, ost->index);
3206  return ret;
3207  }
3208  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3209  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3210  av_buffersink_set_frame_size(ost->filter->filter,
3211  ost->enc_ctx->frame_size);
3212  assert_avoptions(ost->encoder_opts);
3213  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3214  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3215  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3216  " It takes bits/s as argument, not kbits/s\n");
3217 
3219  if (ret < 0) {
3221  "Error initializing the output stream codec context.\n");
3222  exit_program(1);
3223  }
3224 
3225  if (ost->enc_ctx->nb_coded_side_data) {
3226  int i;
3227 
3228  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3229  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3230  uint8_t *dst_data;
3231 
3232  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3233  if (!dst_data)
3234  return AVERROR(ENOMEM);
3235  memcpy(dst_data, sd_src->data, sd_src->size);
3236  }
3237  }
3238 
3239  /*
3240  * Add global input side data. For now this is naive, and copies it
3241  * from the input stream's global side data. All side data should
3242  * really be funneled over AVFrame and libavfilter, then added back to
3243  * packet side data, and then potentially using the first packet for
3244  * global side data.
3245  */
3246  if (ist) {
3247  int i;
3248  for (i = 0; i < ist->st->nb_side_data; i++) {
3249  AVPacketSideData *sd = &ist->st->side_data[i];
3250  if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3251  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3252  if (!dst)
3253  return AVERROR(ENOMEM);
3254  memcpy(dst, sd->data, sd->size);
3255  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3256  av_display_rotation_set((uint32_t *)dst, 0);
3257  }
3258  }
3259  }
3260 
3261  // copy timebase while removing common factors
3262  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3263  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3264 
3265  // copy estimated duration as a hint to the muxer
3266  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3267  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3268  } else if (ost->source_index >= 0) {
3270  if (ret < 0)
3271  return ret;
3272  }
3273 
3274  /* initialize bitstream filters for the output stream
3275  * needs to be done here, because the codec id for streamcopy is not
3276  * known until now */
3278  if (ret < 0)
3279  return ret;
3280 
3281  if (ost->sq_idx_mux >= 0)
3282  sq_set_tb(of->sq_mux, ost->sq_idx_mux, ost->mux_timebase);
3283 
3284  ost->initialized = 1;
3285 
3286  ret = of_check_init(output_files[ost->file_index]);
3287  if (ret < 0)
3288  return ret;
3289 
3290  return ret;
3291 }
3292 
3293 static int transcode_init(void)
3294 {
3295  int ret = 0, i, j, k;
3296  OutputStream *ost;
3297  InputStream *ist;
3298  char error[1024] = {0};
3299 
3300  for (i = 0; i < nb_filtergraphs; i++) {
3301  FilterGraph *fg = filtergraphs[i];
3302  for (j = 0; j < fg->nb_outputs; j++) {
3303  OutputFilter *ofilter = fg->outputs[j];
3304  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3305  continue;
3306  if (fg->nb_inputs != 1)
3307  continue;
3308  for (k = nb_input_streams-1; k >= 0 ; k--)
3309  if (fg->inputs[0]->ist == input_streams[k])
3310  break;
3311  ofilter->ost->source_index = k;
3312  }
3313  }
3314 
3315  /* init framerate emulation */
3316  for (i = 0; i < nb_input_files; i++) {
3318  if (ifile->readrate || ifile->rate_emu)
3319  for (j = 0; j < ifile->nb_streams; j++)
3320  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3321  }
3322 
3323  // Correct starttime based on the enabled streams
3324  for (i = 0; i < nb_input_files; i++) {
3326  AVFormatContext *is = ifile->ctx;
3327  int64_t new_start_time = INT64_MAX;
3328 
3329  if (is->start_time == AV_NOPTS_VALUE ||
3330  !(is->iformat->flags & AVFMT_TS_DISCONT))
3331  continue;
3332 
3333  for (int j = 0; j < is->nb_streams; j++) {
3334  AVStream *st = is->streams[j];
3336  continue;
3337  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3338  }
3339  if (new_start_time > is->start_time) {
3340  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3341  ifile->ts_offset = -new_start_time;
3342  }
3343  }
3344 
3345  /* init input streams */
3346  for (i = 0; i < nb_input_streams; i++)
3347  if ((ret = init_input_stream(i, error, sizeof(error))) < 0)
3348  goto dump_format;
3349 
3350  /*
3351  * initialize stream copy and subtitle/data streams.
3352  * Encoded AVFrame based streams will get initialized as follows:
3353  * - when the first AVFrame is received in do_video_out
3354  * - just before the first AVFrame is received in either transcode_step
3355  * or reap_filters due to us requiring the filter chain buffer sink
3356  * to be configured with the correct audio frame size, which is only
3357  * known after the encoder is initialized.
3358  */
3359  for (i = 0; i < nb_output_streams; i++) {
3360  if (output_streams[i]->enc_ctx &&
3363  continue;
3364 
3366  if (ret < 0)
3367  goto dump_format;
3368  }
3369 
3370  /* discard unused programs */
3371  for (i = 0; i < nb_input_files; i++) {
3373  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3374  AVProgram *p = ifile->ctx->programs[j];
3375  int discard = AVDISCARD_ALL;
3376 
3377  for (k = 0; k < p->nb_stream_indexes; k++)
3378  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3380  break;
3381  }
3382  p->discard = discard;
3383  }
3384  }
3385 
3386  dump_format:
3387  /* dump the stream mapping */
3388  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3389  for (i = 0; i < nb_input_streams; i++) {
3390  ist = input_streams[i];
3391 
3392  for (j = 0; j < ist->nb_filters; j++) {
3393  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3394  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3395  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3396  ist->filters[j]->name);
3397  if (nb_filtergraphs > 1)
3398  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3399  av_log(NULL, AV_LOG_INFO, "\n");
3400  }
3401  }
3402  }
3403 
3404  for (i = 0; i < nb_output_streams; i++) {
3405  ost = output_streams[i];
3406 
3407  if (ost->attachment_filename) {
3408  /* an attached file */
3409  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3410  ost->attachment_filename, ost->file_index, ost->index);
3411  continue;
3412  }
3413 
3414  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3415  /* output from a complex graph */
3416  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3417  if (nb_filtergraphs > 1)
3418  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3419 
3420  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3421  ost->index, ost->enc ? ost->enc->name : "?");
3422  continue;
3423  }
3424 
3425  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3426  input_streams[ost->source_index]->file_index,
3427  input_streams[ost->source_index]->st->index,
3428  ost->file_index,
3429  ost->index);
3430  if (ost->enc_ctx) {
3431  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3432  const AVCodec *out_codec = ost->enc;
3433  const char *decoder_name = "?";
3434  const char *in_codec_name = "?";
3435  const char *encoder_name = "?";
3436  const char *out_codec_name = "?";
3437  const AVCodecDescriptor *desc;
3438 
3439  if (in_codec) {
3440  decoder_name = in_codec->name;
3441  desc = avcodec_descriptor_get(in_codec->id);
3442  if (desc)
3443  in_codec_name = desc->name;
3444  if (!strcmp(decoder_name, in_codec_name))
3445  decoder_name = "native";
3446  }
3447 
3448  if (out_codec) {
3449  encoder_name = out_codec->name;
3450  desc = avcodec_descriptor_get(out_codec->id);
3451  if (desc)
3452  out_codec_name = desc->name;
3453  if (!strcmp(encoder_name, out_codec_name))
3454  encoder_name = "native";
3455  }
3456 
3457  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3458  in_codec_name, decoder_name,
3459  out_codec_name, encoder_name);
3460  } else
3461  av_log(NULL, AV_LOG_INFO, " (copy)");
3462  av_log(NULL, AV_LOG_INFO, "\n");
3463  }
3464 
3465  if (ret) {
3466  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3467  return ret;
3468  }
3469 
3471 
3472  return 0;
3473 }
3474 
3475 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3476 static int need_output(void)
3477 {
3478  int i;
3479 
3480  for (i = 0; i < nb_output_streams; i++) {
3482 
3483  if (ost->finished)
3484  continue;
3485 
3486  return 1;
3487  }
3488 
3489  return 0;
3490 }
3491 
3492 /**
3493  * Select the output stream to process.
3494  *
3495  * @return selected output stream, or NULL if none available
3496  */
3498 {
3499  int i;
3500  int64_t opts_min = INT64_MAX;
3501  OutputStream *ost_min = NULL;
3502 
3503  for (i = 0; i < nb_output_streams; i++) {
3505  int64_t opts;
3506 
3507  if (ost->filter && ost->last_filter_pts != AV_NOPTS_VALUE) {
3508  opts = ost->last_filter_pts;
3509  } else {
3510  opts = ost->last_mux_dts == AV_NOPTS_VALUE ?
3511  INT64_MIN : ost->last_mux_dts;
3512  if (ost->last_mux_dts == AV_NOPTS_VALUE)
3514  "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3515  ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3516  }
3517 
3518  if (!ost->initialized && !ost->inputs_done)
3519  return ost->unavailable ? NULL : ost;
3520 
3521  if (!ost->finished && opts < opts_min) {
3522  opts_min = opts;
3523  ost_min = ost->unavailable ? NULL : ost;
3524  }
3525  }
3526  return ost_min;
3527 }
3528 
3529 static void set_tty_echo(int on)
3530 {
3531 #if HAVE_TERMIOS_H
3532  struct termios tty;
3533  if (tcgetattr(0, &tty) == 0) {
3534  if (on) tty.c_lflag |= ECHO;
3535  else tty.c_lflag &= ~ECHO;
3536  tcsetattr(0, TCSANOW, &tty);
3537  }
3538 #endif
3539 }
3540 
3541 static int check_keyboard_interaction(int64_t cur_time)
3542 {
3543  int i, ret, key;
3544  static int64_t last_time;
3545  if (received_nb_signals)
3546  return AVERROR_EXIT;
3547  /* read_key() returns 0 on EOF */
3548  if (cur_time - last_time >= 100000) {
3549  key = read_key();
3550  last_time = cur_time;
3551  }else
3552  key = -1;
3553  if (key == 'q') {
3554  av_log(NULL, AV_LOG_INFO, "\n\n[q] command received. Exiting.\n\n");
3555  return AVERROR_EXIT;
3556  }
3557  if (key == '+') av_log_set_level(av_log_get_level()+10);
3558  if (key == '-') av_log_set_level(av_log_get_level()-10);
3559  if (key == 's') qp_hist ^= 1;
3560  if (key == 'c' || key == 'C'){
3561  char buf[4096], target[64], command[256], arg[256] = {0};
3562  double time;
3563  int k, n = 0;
3564  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3565  i = 0;
3566  set_tty_echo(1);
3567  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3568  if (k > 0)
3569  buf[i++] = k;
3570  buf[i] = 0;
3571  set_tty_echo(0);
3572  fprintf(stderr, "\n");
3573  if (k > 0 &&
3574  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3575  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3576  target, time, command, arg);
3577  for (i = 0; i < nb_filtergraphs; i++) {
3578  FilterGraph *fg = filtergraphs[i];
3579  if (fg->graph) {
3580  if (time < 0) {
3581  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3582  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3583  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3584  } else if (key == 'c') {
3585  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3587  } else {
3588  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3589  if (ret < 0)
3590  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3591  }
3592  }
3593  }
3594  } else {
3596  "Parse error, at least 3 arguments were expected, "
3597  "only %d given in string '%s'\n", n, buf);
3598  }
3599  }
3600  if (key == 'd' || key == 'D'){
3601  int debug=0;
3602  if(key == 'D') {
3603  debug = input_streams[0]->dec_ctx->debug << 1;
3604  if(!debug) debug = 1;
3605  while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash
3606  debug += debug;
3607  }else{
3608  char buf[32];
3609  int k = 0;
3610  i = 0;
3611  set_tty_echo(1);
3612  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3613  if (k > 0)
3614  buf[i++] = k;
3615  buf[i] = 0;
3616  set_tty_echo(0);
3617  fprintf(stderr, "\n");
3618  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3619  fprintf(stderr,"error parsing debug value\n");
3620  }
3621  for(i=0;i<nb_input_streams;i++) {
3622  input_streams[i]->dec_ctx->debug = debug;
3623  }
3624  for(i=0;i<nb_output_streams;i++) {
3626  if (ost->enc_ctx)
3627  ost->enc_ctx->debug = debug;
3628  }
3629  if(debug) av_log_set_level(AV_LOG_DEBUG);
3630  fprintf(stderr,"debug=%d\n", debug);
3631  }
3632  if (key == '?'){
3633  fprintf(stderr, "key function\n"
3634  "? show this help\n"
3635  "+ increase verbosity\n"
3636  "- decrease verbosity\n"
3637  "c Send command to first matching filter supporting it\n"
3638  "C Send/Queue command to all matching filters\n"
3639  "D cycle through available debug modes\n"
3640  "h dump packets/hex press to cycle through the 3 states\n"
3641  "q quit\n"
3642  "s Show QP histogram\n"
3643  );
3644  }
3645  return 0;
3646 }
3647 
3648 static int got_eagain(void)
3649 {
3650  int i;
3651  for (i = 0; i < nb_output_streams; i++)
3652  if (output_streams[i]->unavailable)
3653  return 1;
3654  return 0;
3655 }
3656 
3657 static void reset_eagain(void)
3658 {
3659  int i;
3660  for (i = 0; i < nb_input_files; i++)
3661  input_files[i]->eagain = 0;
3662  for (i = 0; i < nb_output_streams; i++)
3663  output_streams[i]->unavailable = 0;
3664 }
3665 
3667 {
3668  for (int i = 0; i < ifile->nb_streams; i++) {
3669  InputStream *ist = input_streams[ifile->ist_index + i];
3670  int ret;
3671 
3672  if (!ist->processing_needed)
3673  continue;
3674 
3675  do {
3676  ret = process_input_packet(ist, NULL, 1);
3677  } while (ret > 0);
3678 
3679  if (ist->decoding_needed) {
3680  /* report last frame duration to the demuxer thread */
3681  if (ist->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
3682  LastFrameDuration dur;
3683 
3684  dur.stream_idx = i;
3685  dur.duration = av_rescale_q(ist->nb_samples,
3686  (AVRational){ 1, ist->dec_ctx->sample_rate},
3687  ist->st->time_base);
3688 
3689  av_thread_message_queue_send(ifile->audio_duration_queue, &dur, 0);
3690  }
3691 
3693  }
3694  }
3695 }
3696 
3698  AVPacket *pkt)
3699 {
3700  const int fmt_is_discont = ifile->ctx->iformat->flags & AVFMT_TS_DISCONT;
3701  int disable_discontinuity_correction = copy_ts;
3702  int64_t pkt_dts = av_rescale_q_rnd(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q,
3704 
3705  if (copy_ts && ist->next_dts != AV_NOPTS_VALUE &&
3706  fmt_is_discont && ist->st->pts_wrap_bits < 60) {
3707  int64_t wrap_dts = av_rescale_q_rnd(pkt->dts + (1LL<<ist->st->pts_wrap_bits),
3708  ist->st->time_base, AV_TIME_BASE_Q,
3710  if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
3711  disable_discontinuity_correction = 0;
3712  }
3713 
3714  if (ist->next_dts != AV_NOPTS_VALUE && !disable_discontinuity_correction) {
3715  int64_t delta = pkt_dts - ist->next_dts;
3716  if (fmt_is_discont) {
3717  if (FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE ||
3718  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3719  ifile->ts_offset_discont -= delta;
3721  "timestamp discontinuity for stream #%d:%d "
3722  "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
3723  ist->file_index, ist->st->index, ist->st->id,
3725  delta, ifile->ts_offset_discont);
3727  if (pkt->pts != AV_NOPTS_VALUE)
3729  }
3730  } else {
3731  if (FFABS(delta) > 1LL * dts_error_threshold * AV_TIME_BASE) {
3732  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt->dts, ist->next_dts, pkt->stream_index);
3733  pkt->dts = AV_NOPTS_VALUE;
3734  }
3735  if (pkt->pts != AV_NOPTS_VALUE){
3736  int64_t pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
3737  delta = pkt_pts - ist->next_dts;
3738  if (FFABS(delta) > 1LL * dts_error_threshold * AV_TIME_BASE) {
3739  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt->pts, ist->next_dts, pkt->stream_index);
3740  pkt->pts = AV_NOPTS_VALUE;
3741  }
3742  }
3743  }
3744  } else if (ist->next_dts == AV_NOPTS_VALUE && !copy_ts &&
3745  fmt_is_discont && ifile->last_ts != AV_NOPTS_VALUE) {
3746  int64_t delta = pkt_dts - ifile->last_ts;
3747  if (FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE) {
3748  ifile->ts_offset_discont -= delta;
3750  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3751  delta, ifile->ts_offset_discont);
3753  if (pkt->pts != AV_NOPTS_VALUE)
3755  }
3756  }
3757 
3758  ifile->last_ts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
3759 }
3760 
3762  AVPacket *pkt)
3763 {
3764  int64_t offset = av_rescale_q(ifile->ts_offset_discont, AV_TIME_BASE_Q,
3765  ist->st->time_base);
3766 
3767  // apply previously-detected timestamp-discontinuity offset
3768  // (to all streams, not just audio/video)
3769  if (pkt->dts != AV_NOPTS_VALUE)
3770  pkt->dts += offset;
3771  if (pkt->pts != AV_NOPTS_VALUE)
3772  pkt->pts += offset;
3773 
3774  // detect timestamp discontinuities for audio/video
3775  if ((ist->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ||
3777  pkt->dts != AV_NOPTS_VALUE)
3779 }
3780 
3781 /*
3782  * Return
3783  * - 0 -- one packet was read and processed
3784  * - AVERROR(EAGAIN) -- no packets were available for selected file,
3785  * this function should be called again
3786  * - AVERROR_EOF -- this function should not be called again
3787  */
3788 static int process_input(int file_index)
3789 {
3790  InputFile *ifile = input_files[file_index];
3792  InputStream *ist;
3793  AVPacket *pkt;
3794  int ret, i, j;
3795 
3796  is = ifile->ctx;
3798 
3799  if (ret == AVERROR(EAGAIN)) {
3800  ifile->eagain = 1;
3801  return ret;
3802  }
3803  if (ret == 1) {
3804  /* the input file is looped: flush the decoders */
3806  return AVERROR(EAGAIN);
3807  }
3808  if (ret < 0) {
3809  if (ret != AVERROR_EOF) {
3810  print_error(is->url, ret);
3811  if (exit_on_error)
3812  exit_program(1);
3813  }
3814 
3815  for (i = 0; i < ifile->nb_streams; i++) {
3816  ist = input_streams[ifile->ist_index + i];
3817  if (ist->processing_needed) {
3818  ret = process_input_packet(ist, NULL, 0);
3819  if (ret>0)
3820  return 0;
3821  }
3822 
3823  /* mark all outputs that don't go through lavfi as finished */
3824  for (j = 0; j < nb_output_streams; j++) {
3826 
3827  if (ost->source_index == ifile->ist_index + i &&
3828  (!ost->enc_ctx || ost->enc->type == AVMEDIA_TYPE_SUBTITLE)) {
3829  OutputFile *of = output_files[ost->file_index];
3830  output_packet(of, ost->pkt, ost, 1);
3831  }
3832  }
3833  }
3834 
3835  ifile->eof_reached = 1;
3836  return AVERROR(EAGAIN);
3837  }
3838 
3839  reset_eagain();
3840 
3841  ist = input_streams[ifile->ist_index + pkt->stream_index];
3842 
3843  ist->data_size += pkt->size;
3844  ist->nb_packets++;
3845 
3846  if (ist->discard)
3847  goto discard_packet;
3848 
3849  /* add the stream-global side data to the first packet */
3850  if (ist->nb_packets == 1) {
3851  for (i = 0; i < ist->st->nb_side_data; i++) {
3852  AVPacketSideData *src_sd = &ist->st->side_data[i];
3853  uint8_t *dst_data;
3854 
3855  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3856  continue;
3857 
3858  if (av_packet_get_side_data(pkt, src_sd->type, NULL))
3859  continue;
3860 
3861  dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size);
3862  if (!dst_data)
3863  exit_program(1);
3864 
3865  memcpy(dst_data, src_sd->data, src_sd->size);
3866  }
3867  }
3868 
3869  // detect and try to correct for timestamp discontinuities
3871 
3872  if (debug_ts) {
3873  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s duration:%s duration_time:%s off:%s off_time:%s\n",
3874  ifile->ist_index + pkt->stream_index,
3881  }
3882 
3883  sub2video_heartbeat(ist, pkt->pts);
3884 
3885  process_input_packet(ist, pkt, 0);
3886 
3887 discard_packet:
3888  av_packet_free(&pkt);
3889 
3890  return 0;
3891 }
3892 
3893 /**
3894  * Perform a step of transcoding for the specified filter graph.
3895  *
3896  * @param[in] graph filter graph to consider
3897  * @param[out] best_ist input stream where a frame would allow to continue
3898  * @return 0 for success, <0 for error
3899  */
3900 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
3901 {
3902  int i, ret;
3903  int nb_requests, nb_requests_max = 0;
3904  InputFilter *ifilter;
3905  InputStream *ist;
3906 
3907  *best_ist = NULL;
3909  if (ret >= 0)
3910  return reap_filters(0);
3911 
3912  if (ret == AVERROR_EOF) {
3913  ret = reap_filters(1);
3914  for (i = 0; i < graph->nb_outputs; i++)
3915  close_output_stream(graph->outputs[i]->ost);
3916  return ret;
3917  }
3918  if (ret != AVERROR(EAGAIN))
3919  return ret;
3920 
3921  for (i = 0; i < graph->nb_inputs; i++) {
3922  ifilter = graph->inputs[i];
3923  ist = ifilter->ist;
3924  if (input_files[ist->file_index]->eagain ||
3926  continue;
3927  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
3928  if (nb_requests > nb_requests_max) {
3929  nb_requests_max = nb_requests;
3930  *best_ist = ist;
3931  }
3932  }
3933 
3934  if (!*best_ist)
3935  for (i = 0; i < graph->nb_outputs; i++)
3936  graph->outputs[i]->ost->unavailable = 1;
3937 
3938  return 0;
3939 }
3940 
3941 /**
3942  * Run a single step of transcoding.
3943  *
3944  * @return 0 for success, <0 for error
3945  */
3946 static int transcode_step(void)
3947 {
3948  OutputStream *ost;
3949  InputStream *ist = NULL;
3950  int ret;
3951 
3952  ost = choose_output();
3953  if (!ost) {
3954  if (got_eagain()) {
3955  reset_eagain();
3956  av_usleep(10000);
3957  return 0;
3958  }
3959  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
3960  return AVERROR_EOF;
3961  }
3962 
3963  if (ost->filter && !ost->filter->graph->graph) {
3964  if (ifilter_has_all_input_formats(ost->filter->graph)) {
3965  ret = configure_filtergraph(ost->filter->graph);
3966  if (ret < 0) {
3967  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
3968  return ret;
3969  }
3970  }
3971  }
3972 
3973  if (ost->filter && ost->filter->graph->graph) {
3974  /*
3975  * Similar case to the early audio initialization in reap_filters.
3976  * Audio is special in ffmpeg.c currently as we depend on lavfi's
3977  * audio frame buffering/creation to get the output audio frame size
3978  * in samples correct. The audio frame size for the filter chain is
3979  * configured during the output stream initialization.
3980  *
3981  * Apparently avfilter_graph_request_oldest (called in
3982  * transcode_from_filter just down the line) peeks. Peeking already
3983  * puts one frame "ready to be given out", which means that any
3984  * update in filter buffer sink configuration afterwards will not
3985  * help us. And yes, even if it would be utilized,
3986  * av_buffersink_get_samples is affected, as it internally utilizes
3987  * the same early exit for peeked frames.
3988  *
3989  * In other words, if avfilter_graph_request_oldest would not make
3990  * further filter chain configuration or usage of
3991  * av_buffersink_get_samples useless (by just causing the return
3992  * of the peeked AVFrame as-is), we could get rid of this additional
3993  * early encoder initialization.
3994  */
3995  if (av_buffersink_get_type(ost->filter->filter) == AVMEDIA_TYPE_AUDIO)
3997 
3998  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
3999  return ret;
4000  if (!ist)
4001  return 0;
4002  } else if (ost->filter) {
4003  int i;
4004  for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4005  InputFilter *ifilter = ost->filter->graph->inputs[i];
4006  if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4007  ist = ifilter->ist;
4008  break;
4009  }
4010  }
4011  if (!ist) {
4012  ost->inputs_done = 1;
4013  return 0;
4014  }
4015  } else {
4016  av_assert0(ost->source_index >= 0);
4017  ist = input_streams[ost->source_index];
4018  }
4019 
4020  ret = process_input(ist->file_index);
4021  if (ret == AVERROR(EAGAIN)) {
4022  if (input_files[ist->file_index]->eagain)
4023  ost->unavailable = 1;
4024  return 0;
4025  }
4026 
4027  if (ret < 0)
4028  return ret == AVERROR_EOF ? 0 : ret;
4029 
4030  return reap_filters(0);
4031 }
4032 
4033 /*
4034  * The following code is the main loop of the file converter
4035  */
4036 static int transcode(void)
4037 {
4038  int ret, i;
4039  OutputStream *ost;
4040  InputStream *ist;
4041  int64_t timer_start;
4042  int64_t total_packets_written = 0;
4043 
4044  ret = transcode_init();
4045  if (ret < 0)
4046  goto fail;
4047 
4048  if (stdin_interaction) {
4049  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4050  }
4051 
4052  timer_start = av_gettime_relative();
4053 
4054  if ((ret = init_input_threads()) < 0)
4055  goto fail;
4056 
4057  while (!received_sigterm) {
4058  int64_t cur_time= av_gettime_relative();
4059 
4060  /* if 'q' pressed, exits */
4061  if (stdin_interaction)
4062  if (check_keyboard_interaction(cur_time) < 0)
4063  break;
4064 
4065  /* check if there's any stream where output is still needed */
4066  if (!need_output()) {
4067  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4068  break;
4069  }
4070 
4071  ret = transcode_step();
4072  if (ret < 0 && ret != AVERROR_EOF) {
4073  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4074  break;
4075  }
4076 
4077  /* dump report by using the output first video and audio streams */
4078  print_report(0, timer_start, cur_time);
4079  }
4081 
4082  /* at the end of stream, we must flush the decoder buffers */
4083  for (i = 0; i < nb_input_streams; i++) {
4084  ist = input_streams[i];
4085  if (!input_files[ist->file_index]->eof_reached) {
4086  process_input_packet(ist, NULL, 0);
4087  }
4088  }
4089  flush_encoders();
4090 
4091  term_exit();
4092 
4093  /* write the trailer if needed */
4094  for (i = 0; i < nb_output_files; i++) {
4096  if (ret < 0 && exit_on_error)
4097  exit_program(1);
4098  }
4099 
4100  /* dump report by using the first video and audio streams */
4101  print_report(1, timer_start, av_gettime_relative());
4102 
4103  /* close each encoder */
4104  for (i = 0; i < nb_output_streams; i++) {
4105  uint64_t packets_written;
4106  ost = output_streams[i];
4107  packets_written = atomic_load(&ost->packets_written);
4108  total_packets_written += packets_written;
4109  if (!packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) {
4110  av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i);
4111  exit_program(1);
4112  }
4113  }
4114 
4115  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4116  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4117  exit_program(1);
4118  }
4119 
4120  /* close each decoder */
4121  for (i = 0; i < nb_input_streams; i++) {
4122  ist = input_streams[i];
4123  if (ist->decoding_needed) {
4124  avcodec_close(ist->dec_ctx);
4125  }
4126  }
4127 
4129 
4130  /* finished ! */
4131  ret = 0;
4132 
4133  fail:
4135 
4136  if (output_streams) {
4137  for (i = 0; i < nb_output_streams; i++) {
4138  ost = output_streams[i];
4139  if (ost) {
4140  if (ost->logfile) {
4141  if (fclose(ost->logfile))
4143  "Error closing logfile, loss of information possible: %s\n",
4144  av_err2str(AVERROR(errno)));
4145  ost->logfile = NULL;
4146  }
4147  av_freep(&ost->forced_kf_pts);
4148  av_freep(&ost->apad);
4150  av_dict_free(&ost->encoder_opts);
4151  av_dict_free(&ost->sws_dict);
4152  av_dict_free(&ost->swr_opts);
4153  }
4154  }
4155  }
4156  return ret;
4157 }
4158 
4160 {
4161  BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4162 #if HAVE_GETRUSAGE
4163  struct rusage rusage;
4164 
4165  getrusage(RUSAGE_SELF, &rusage);
4166  time_stamps.user_usec =
4167  (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4168  time_stamps.sys_usec =
4169  (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4170 #elif HAVE_GETPROCESSTIMES
4171  HANDLE proc;
4172  FILETIME c, e, k, u;
4173  proc = GetCurrentProcess();
4174  GetProcessTimes(proc, &c, &e, &k, &u);
4175  time_stamps.user_usec =
4176  ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4177  time_stamps.sys_usec =
4178  ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4179 #else
4180  time_stamps.user_usec = time_stamps.sys_usec = 0;
4181 #endif
4182  return time_stamps;
4183 }
4184 
4185 static int64_t getmaxrss(void)
4186 {
4187 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4188  struct rusage rusage;
4189  getrusage(RUSAGE_SELF, &rusage);
4190  return (int64_t)rusage.ru_maxrss * 1024;
4191 #elif HAVE_GETPROCESSMEMORYINFO
4192  HANDLE proc;
4193  PROCESS_MEMORY_COUNTERS memcounters;
4194  proc = GetCurrentProcess();
4195  memcounters.cb = sizeof(memcounters);
4196  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4197  return memcounters.PeakPagefileUsage;
4198 #else
4199  return 0;
4200 #endif
4201 }
4202 
4203 int main(int argc, char **argv)
4204 {
4205  int ret;
4207 
4208  init_dynload();
4209 
4211 
4212  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4213 
4215  parse_loglevel(argc, argv, options);
4216 
4217 #if CONFIG_AVDEVICE
4219 #endif
4221 
4222  show_banner(argc, argv, options);
4223 
4224  /* parse options and open all input/output files */
4225  ret = ffmpeg_parse_options(argc, argv);
4226  if (ret < 0)
4227  exit_program(1);
4228 
4229  if (nb_output_files <= 0 && nb_input_files == 0) {
4230  show_usage();
4231  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4232  exit_program(1);
4233  }
4234 
4235  /* file converter / grab */
4236  if (nb_output_files <= 0) {
4237  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4238  exit_program(1);
4239  }
4240 
4242  if (transcode() < 0)
4243  exit_program(1);
4244  if (do_benchmark) {
4245  int64_t utime, stime, rtime;
4247  utime = current_time.user_usec - ti.user_usec;
4248  stime = current_time.sys_usec - ti.sys_usec;
4249  rtime = current_time.real_usec - ti.real_usec;
4251  "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
4252  utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
4253  }
4254  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4257  exit_program(69);
4258 
4260  return main_return_code;
4261 }
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:31
sub2video_copy_rect
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg.c:184
AVSubtitle
Definition: avcodec.h:2314
avcodec_close
av_cold int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: avcodec.c:440
avcodec_encode_subtitle
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub)
Definition: encode.c:146
InputFilter::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg.h:265
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:422
AVCodec
AVCodec.
Definition: codec.h:204
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
av_codec_get_id
enum AVCodecID av_codec_get_id(const struct AVCodecTag *const *tags, unsigned int tag)
Get the AVCodecID for the given codec tag tag.
AVFILTER_CMD_FLAG_ONE
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:749
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:241
need_output
static int need_output(void)
Definition: ffmpeg.c:3476
audio_sync_method
int audio_sync_method
Definition: ffmpeg_opt.c:160
check_output_constraints
static int check_output_constraints(InputStream *ist, OutputStream *ost)
Definition: ffmpeg.c:1845
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
av_buffersink_get_sample_aspect_ratio
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
psnr
static double psnr(double d)
Definition: ffmpeg.c:849
AVERROR_EXPERIMENTAL
#define AVERROR_EXPERIMENTAL
Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it.
Definition: error.h:74
AV_CODEC_ID_AC3
@ AV_CODEC_ID_AC3
Definition: codec_id.h:432
ifilter_parameters_from_codecpar
static int ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
Definition: ffmpeg.c:1761
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:124
av_clip
#define av_clip
Definition: common.h:95
FKF_PREV_FORCED_T
@ FKF_PREV_FORCED_T
Definition: ffmpeg.h:459
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
reset_eagain
static void reset_eagain(void)
Definition: ffmpeg.c:3657
InputStream::hwaccel_device
char * hwaccel_device
Definition: ffmpeg.h:389
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:452
VSYNC_VFR
@ VSYNC_VFR
Definition: ffmpeg.h:61
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nb_input_files
int nb_input_files
Definition: ffmpeg.c:147
opt.h
configure_filtergraph
int configure_filtergraph(FilterGraph *fg)
Definition: ffmpeg_filter.c:974
ffmpeg_exited
static volatile int ffmpeg_exited
Definition: ffmpeg.c:338