FFmpeg
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavcodec/version.h"
68 #include "libavformat/os_support.h"
69 
70 # include "libavfilter/avfilter.h"
71 # include "libavfilter/buffersrc.h"
72 # include "libavfilter/buffersink.h"
73 
74 #if HAVE_SYS_RESOURCE_H
75 #include <sys/time.h>
76 #include <sys/types.h>
77 #include <sys/resource.h>
78 #elif HAVE_GETPROCESSTIMES
79 #include <windows.h>
80 #endif
81 #if HAVE_GETPROCESSMEMORYINFO
82 #include <windows.h>
83 #include <psapi.h>
84 #endif
85 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <windows.h>
87 #endif
88 
89 
90 #if HAVE_SYS_SELECT_H
91 #include <sys/select.h>
92 #endif
93 
94 #if HAVE_TERMIOS_H
95 #include <fcntl.h>
96 #include <sys/ioctl.h>
97 #include <sys/time.h>
98 #include <termios.h>
99 #elif HAVE_KBHIT
100 #include <conio.h>
101 #endif
102 
103 #include <time.h>
104 
105 #include "ffmpeg.h"
106 #include "cmdutils.h"
107 #include "sync_queue.h"
108 
109 #include "libavutil/avassert.h"
110 
111 const char program_name[] = "ffmpeg";
112 const int program_birth_year = 2000;
113 
114 static FILE *vstats_file;
115 
116 // optionally attached as opaque_ref to decoded AVFrames
117 typedef struct FrameData {
118  uint64_t idx;
119  int64_t pts;
121 } FrameData;
122 
123 typedef struct BenchmarkTimeStamps {
124  int64_t real_usec;
125  int64_t user_usec;
126  int64_t sys_usec;
128 
131 static int64_t getmaxrss(void);
133 
134 static int64_t nb_frames_dup = 0;
135 static uint64_t dup_warning = 1000;
136 static int64_t nb_frames_drop = 0;
137 static int64_t decode_error_stat[2];
138 unsigned nb_output_dumped = 0;
139 
142 
145 
148 
151 
152 #if HAVE_TERMIOS_H
153 
154 /* init terminal so that we can grab keys */
155 static struct termios oldtty;
156 static int restore_tty;
157 #endif
158 
159 /* sub2video hack:
160  Convert subtitles to video with alpha to insert them in filter graphs.
161  This is a temporary solution until libavfilter gets real subtitles support.
162  */
163 
165 {
166  int ret;
167  AVFrame *frame = ist->sub2video.frame;
168 
170  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
171  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
173  if ((ret = av_frame_get_buffer(frame, 0)) < 0)
174  return ret;
175  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
176  return 0;
177 }
178 
179 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
180  AVSubtitleRect *r)
181 {
182  uint32_t *pal, *dst2;
183  uint8_t *src, *src2;
184  int x, y;
185 
186  if (r->type != SUBTITLE_BITMAP) {
187  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
188  return;
189  }
190  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
191  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
192  r->x, r->y, r->w, r->h, w, h
193  );
194  return;
195  }
196 
197  dst += r->y * dst_linesize + r->x * 4;
198  src = r->data[0];
199  pal = (uint32_t *)r->data[1];
200  for (y = 0; y < r->h; y++) {
201  dst2 = (uint32_t *)dst;
202  src2 = src;
203  for (x = 0; x < r->w; x++)
204  *(dst2++) = pal[*(src2++)];
205  dst += dst_linesize;
206  src += r->linesize[0];
207  }
208 }
209 
210 static void sub2video_push_ref(InputStream *ist, int64_t pts)
211 {
212  AVFrame *frame = ist->sub2video.frame;
213  int i;
214  int ret;
215 
216  av_assert1(frame->data[0]);
217  ist->sub2video.last_pts = frame->pts = pts;
218  for (i = 0; i < ist->nb_filters; i++) {
222  if (ret != AVERROR_EOF && ret < 0)
223  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
224  av_err2str(ret));
225  }
226 }
227 
228 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
229 {
230  AVFrame *frame = ist->sub2video.frame;
231  int8_t *dst;
232  int dst_linesize;
233  int num_rects, i;
234  int64_t pts, end_pts;
235 
236  if (!frame)
237  return;
238  if (sub) {
239  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
240  AV_TIME_BASE_Q, ist->st->time_base);
241  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
242  AV_TIME_BASE_Q, ist->st->time_base);
243  num_rects = sub->num_rects;
244  } else {
245  /* If we are initializing the system, utilize current heartbeat
246  PTS as the start time, and show until the following subpicture
247  is received. Otherwise, utilize the previous subpicture's end time
248  as the fall-back value. */
249  pts = ist->sub2video.initialize ?
250  heartbeat_pts : ist->sub2video.end_pts;
251  end_pts = INT64_MAX;
252  num_rects = 0;
253  }
254  if (sub2video_get_blank_frame(ist) < 0) {
256  "Impossible to get a blank canvas.\n");
257  return;
258  }
259  dst = frame->data [0];
260  dst_linesize = frame->linesize[0];
261  for (i = 0; i < num_rects; i++)
262  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
263  sub2video_push_ref(ist, pts);
264  ist->sub2video.end_pts = end_pts;
265  ist->sub2video.initialize = 0;
266 }
267 
268 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
269 {
270  InputFile *infile = input_files[ist->file_index];
271  int i, j, nb_reqs;
272  int64_t pts2;
273 
274  /* When a frame is read from a file, examine all sub2video streams in
275  the same file and send the sub2video frame again. Otherwise, decoded
276  video frames could be accumulating in the filter graph while a filter
277  (possibly overlay) is desperately waiting for a subtitle frame. */
278  for (i = 0; i < infile->nb_streams; i++) {
279  InputStream *ist2 = infile->streams[i];
280  if (!ist2->sub2video.frame)
281  continue;
282  /* subtitles seem to be usually muxed ahead of other streams;
283  if not, subtracting a larger time here is necessary */
284  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
285  /* do not send the heartbeat frame if the subtitle is already ahead */
286  if (pts2 <= ist2->sub2video.last_pts)
287  continue;
288  if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
289  /* if we have hit the end of the current displayed subpicture,
290  or if we need to initialize the system, update the
291  overlayed subpicture and its start/end times */
292  sub2video_update(ist2, pts2 + 1, NULL);
293  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
294  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
295  if (nb_reqs)
296  sub2video_push_ref(ist2, pts2);
297  }
298 }
299 
300 static void sub2video_flush(InputStream *ist)
301 {
302  int i;
303  int ret;
304 
305  if (ist->sub2video.end_pts < INT64_MAX)
306  sub2video_update(ist, INT64_MAX, NULL);
307  for (i = 0; i < ist->nb_filters; i++) {
309  if (ret != AVERROR_EOF && ret < 0)
310  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
311  }
312 }
313 
314 /* end of sub2video hack */
315 
316 static void term_exit_sigsafe(void)
317 {
318 #if HAVE_TERMIOS_H
319  if(restore_tty)
320  tcsetattr (0, TCSANOW, &oldtty);
321 #endif
322 }
323 
324 void term_exit(void)
325 {
326  av_log(NULL, AV_LOG_QUIET, "%s", "");
328 }
329 
330 static volatile int received_sigterm = 0;
331 static volatile int received_nb_signals = 0;
333 static volatile int ffmpeg_exited = 0;
336 
337 static void
339 {
340  int ret;
341  received_sigterm = sig;
344  if(received_nb_signals > 3) {
345  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
346  strlen("Received > 3 system signals, hard exiting\n"));
347  if (ret < 0) { /* Do nothing */ };
348  exit(123);
349  }
350 }
351 
352 #if HAVE_SETCONSOLECTRLHANDLER
353 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
354 {
355  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
356 
357  switch (fdwCtrlType)
358  {
359  case CTRL_C_EVENT:
360  case CTRL_BREAK_EVENT:
361  sigterm_handler(SIGINT);
362  return TRUE;
363 
364  case CTRL_CLOSE_EVENT:
365  case CTRL_LOGOFF_EVENT:
366  case CTRL_SHUTDOWN_EVENT:
367  sigterm_handler(SIGTERM);
368  /* Basically, with these 3 events, when we return from this method the
369  process is hard terminated, so stall as long as we need to
370  to try and let the main thread(s) clean up and gracefully terminate
371  (we have at most 5 seconds, but should be done far before that). */
372  while (!ffmpeg_exited) {
373  Sleep(0);
374  }
375  return TRUE;
376 
377  default:
378  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
379  return FALSE;
380  }
381 }
382 #endif
383 
384 #ifdef __linux__
385 #define SIGNAL(sig, func) \
386  do { \
387  action.sa_handler = func; \
388  sigaction(sig, &action, NULL); \
389  } while (0)
390 #else
391 #define SIGNAL(sig, func) \
392  signal(sig, func)
393 #endif
394 
395 void term_init(void)
396 {
397 #if defined __linux__
398  struct sigaction action = {0};
399  action.sa_handler = sigterm_handler;
400 
401  /* block other interrupts while processing this one */
402  sigfillset(&action.sa_mask);
403 
404  /* restart interruptible functions (i.e. don't fail with EINTR) */
405  action.sa_flags = SA_RESTART;
406 #endif
407 
408 #if HAVE_TERMIOS_H
409  if (stdin_interaction) {
410  struct termios tty;
411  if (tcgetattr (0, &tty) == 0) {
412  oldtty = tty;
413  restore_tty = 1;
414 
415  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
416  |INLCR|IGNCR|ICRNL|IXON);
417  tty.c_oflag |= OPOST;
418  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
419  tty.c_cflag &= ~(CSIZE|PARENB);
420  tty.c_cflag |= CS8;
421  tty.c_cc[VMIN] = 1;
422  tty.c_cc[VTIME] = 0;
423 
424  tcsetattr (0, TCSANOW, &tty);
425  }
426  SIGNAL(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
427  }
428 #endif
429 
430  SIGNAL(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
431  SIGNAL(SIGTERM, sigterm_handler); /* Termination (ANSI). */
432 #ifdef SIGXCPU
433  SIGNAL(SIGXCPU, sigterm_handler);
434 #endif
435 #ifdef SIGPIPE
436  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
437 #endif
438 #if HAVE_SETCONSOLECTRLHANDLER
439  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
440 #endif
441 }
442 
443 /* read a key without blocking */
444 static int read_key(void)
445 {
446  unsigned char ch;
447 #if HAVE_TERMIOS_H
448  int n = 1;
449  struct timeval tv;
450  fd_set rfds;
451 
452  FD_ZERO(&rfds);
453  FD_SET(0, &rfds);
454  tv.tv_sec = 0;
455  tv.tv_usec = 0;
456  n = select(1, &rfds, NULL, NULL, &tv);
457  if (n > 0) {
458  n = read(0, &ch, 1);
459  if (n == 1)
460  return ch;
461 
462  return n;
463  }
464 #elif HAVE_KBHIT
465 # if HAVE_PEEKNAMEDPIPE
466  static int is_pipe;
467  static HANDLE input_handle;
468  DWORD dw, nchars;
469  if(!input_handle){
470  input_handle = GetStdHandle(STD_INPUT_HANDLE);
471  is_pipe = !GetConsoleMode(input_handle, &dw);
472  }
473 
474  if (is_pipe) {
475  /* When running under a GUI, you will end here. */
476  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
477  // input pipe may have been closed by the program that ran ffmpeg
478  return -1;
479  }
480  //Read it
481  if(nchars != 0) {
482  read(0, &ch, 1);
483  return ch;
484  }else{
485  return -1;
486  }
487  }
488 # endif
489  if(kbhit())
490  return(getch());
491 #endif
492  return -1;
493 }
494 
495 static int decode_interrupt_cb(void *ctx)
496 {
498 }
499 
501 
502 static void ffmpeg_cleanup(int ret)
503 {
504  int i, j;
505 
506  if (do_benchmark) {
507  int maxrss = getmaxrss() / 1024;
508  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
509  }
510 
511  for (i = 0; i < nb_filtergraphs; i++) {
512  FilterGraph *fg = filtergraphs[i];
514  for (j = 0; j < fg->nb_inputs; j++) {
515  InputFilter *ifilter = fg->inputs[j];
516  struct InputStream *ist = ifilter->ist;
517 
518  if (ifilter->frame_queue) {
519  AVFrame *frame;
520  while (av_fifo_read(ifilter->frame_queue, &frame, 1) >= 0)
522  av_fifo_freep2(&ifilter->frame_queue);
523  }
524  av_freep(&ifilter->displaymatrix);
525  if (ist->sub2video.sub_queue) {
526  AVSubtitle sub;
527  while (av_fifo_read(ist->sub2video.sub_queue, &sub, 1) >= 0)
530  }
531  av_buffer_unref(&ifilter->hw_frames_ctx);
532  av_freep(&ifilter->name);
533  av_freep(&fg->inputs[j]);
534  }
535  av_freep(&fg->inputs);
536  for (j = 0; j < fg->nb_outputs; j++) {
537  OutputFilter *ofilter = fg->outputs[j];
538 
539  avfilter_inout_free(&ofilter->out_tmp);
540  av_freep(&ofilter->name);
542  av_freep(&fg->outputs[j]);
543  }
544  av_freep(&fg->outputs);
545  av_freep(&fg->graph_desc);
546 
548  }
550 
551  /* close files */
552  for (i = 0; i < nb_output_files; i++)
554 
555  for (i = 0; i < nb_input_files; i++)
557 
558  if (vstats_file) {
559  if (fclose(vstats_file))
561  "Error closing vstats file, loss of information possible: %s\n",
562  av_err2str(AVERROR(errno)));
563  }
566 
568 
571 
572  uninit_opts();
573 
575 
576  if (received_sigterm) {
577  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
578  (int) received_sigterm);
579  } else if (ret && atomic_load(&transcode_init_done)) {
580  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
581  }
582  term_exit();
583  ffmpeg_exited = 1;
584 }
585 
586 /* iterate over all output streams in all output files;
587  * pass NULL to start iteration */
589 {
590  int of_idx = prev ? prev->file_index : 0;
591  int ost_idx = prev ? prev->index + 1 : 0;
592 
593  for (; of_idx < nb_output_files; of_idx++) {
594  OutputFile *of = output_files[of_idx];
595  if (ost_idx < of->nb_streams)
596  return of->streams[ost_idx];
597 
598  ost_idx = 0;
599  }
600 
601  return NULL;
602 }
603 
605 {
606  int if_idx = prev ? prev->file_index : 0;
607  int ist_idx = prev ? prev->st->index + 1 : 0;
608 
609  for (; if_idx < nb_input_files; if_idx++) {
610  InputFile *f = input_files[if_idx];
611  if (ist_idx < f->nb_streams)
612  return f->streams[ist_idx];
613 
614  ist_idx = 0;
615  }
616 
617  return NULL;
618 }
619 
621 {
622  const AVDictionaryEntry *t = NULL;
623 
624  while ((t = av_dict_iterate(b, t))) {
626  }
627 }
628 
630 {
631  const AVDictionaryEntry *t;
632  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
633  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
634  exit_program(1);
635  }
636 }
637 
638 static void abort_codec_experimental(const AVCodec *c, int encoder)
639 {
640  exit_program(1);
641 }
642 
643 static void update_benchmark(const char *fmt, ...)
644 {
645  if (do_benchmark_all) {
647  va_list va;
648  char buf[1024];
649 
650  if (fmt) {
651  va_start(va, fmt);
652  vsnprintf(buf, sizeof(buf), fmt, va);
653  va_end(va);
655  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
658  t.real_usec - current_time.real_usec, buf);
659  }
660  current_time = t;
661  }
662 }
663 
665 {
666  OutputFile *of = output_files[ost->file_index];
667  ost->finished |= ENCODER_FINISHED;
668 
669  if (ost->sq_idx_encode >= 0)
670  sq_send(of->sq_encode, ost->sq_idx_encode, SQFRAME(NULL));
671 }
672 
674 {
675  OutputFile *of = output_files[ost->file_index];
676 
677  if (of->recording_time != INT64_MAX &&
678  av_compare_ts(ts, tb, of->recording_time, AV_TIME_BASE_Q) >= 0) {
680  return 0;
681  }
682  return 1;
683 }
684 
686  AVFrame *frame)
687 {
688  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
689  const int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ?
690  0 : of->start_time;
691 
692  AVCodecContext *const enc = ost->enc_ctx;
693 
694  AVRational tb = enc->time_base;
695  AVRational filter_tb = frame->time_base;
696  const int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
697 
698  if (frame->pts == AV_NOPTS_VALUE)
699  goto early_exit;
700 
701  tb.den <<= extra_bits;
702  float_pts = av_rescale_q(frame->pts, filter_tb, tb) -
704  float_pts /= 1 << extra_bits;
705  // avoid exact midoints to reduce the chance of rounding differences, this
706  // can be removed in case the fps code is changed to work with integers
707  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
708 
709  frame->pts = av_rescale_q(frame->pts, filter_tb, enc->time_base) -
710  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
711  frame->time_base = enc->time_base;
712 
713 early_exit:
714 
715  if (debug_ts) {
716  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
717  frame ? av_ts2str(frame->pts) : "NULL",
718  (enc && frame) ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
719  float_pts,
720  enc ? enc->time_base.num : -1,
721  enc ? enc->time_base.den : -1);
722  }
723 
724  return float_pts;
725 }
726 
728  char *error, int error_len);
729 
731  unsigned int fatal)
732 {
733  int ret = AVERROR_BUG;
734  char error[1024] = {0};
735 
736  if (ost->initialized)
737  return 0;
738 
739  ret = init_output_stream(ost, frame, error, sizeof(error));
740  if (ret < 0) {
741  av_log(ost, AV_LOG_ERROR, "Error initializing output stream: %s\n",
742  error);
743 
744  if (fatal)
745  exit_program(1);
746  }
747 
748  return ret;
749 }
750 
751 static double psnr(double d)
752 {
753  return -10.0 * log10(d);
754 }
755 
756 static void update_video_stats(OutputStream *ost, const AVPacket *pkt, int write_vstats)
757 {
759  NULL);
760  AVCodecContext *enc = ost->enc_ctx;
761  int64_t frame_number;
762  double ti1, bitrate, avg_bitrate;
763 
764  ost->quality = sd ? AV_RL32(sd) : -1;
765  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
766 
767  for (int i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
768  if (sd && i < sd[5])
769  ost->error[i] = AV_RL64(sd + 8 + 8*i);
770  else
771  ost->error[i] = -1;
772  }
773 
774  if (!write_vstats)
775  return;
776 
777  /* this is executed just the first time update_video_stats is called */
778  if (!vstats_file) {
779  vstats_file = fopen(vstats_filename, "w");
780  if (!vstats_file) {
781  perror("fopen");
782  exit_program(1);
783  }
784  }
785 
786  frame_number = ost->packets_encoded;
787  if (vstats_version <= 1) {
788  fprintf(vstats_file, "frame= %5"PRId64" q= %2.1f ", frame_number,
789  ost->quality / (float)FF_QP2LAMBDA);
790  } else {
791  fprintf(vstats_file, "out= %2d st= %2d frame= %5"PRId64" q= %2.1f ", ost->file_index, ost->index, frame_number,
792  ost->quality / (float)FF_QP2LAMBDA);
793  }
794 
795  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
796  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
797 
798  fprintf(vstats_file,"f_size= %6d ", pkt->size);
799  /* compute pts value */
800  ti1 = pkt->dts * av_q2d(pkt->time_base);
801  if (ti1 < 0.01)
802  ti1 = 0.01;
803 
804  bitrate = (pkt->size * 8) / av_q2d(enc->time_base) / 1000.0;
805  avg_bitrate = (double)(ost->data_size_enc * 8) / ti1 / 1000.0;
806  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
807  (double)ost->data_size_enc / 1024, ti1, bitrate, avg_bitrate);
808  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
809 }
810 
812  const AVFrame *frame, const AVPacket *pkt,
813  uint64_t frame_num)
814 {
815  AVIOContext *io = es->io;
816  AVRational tb = frame ? frame->time_base : pkt->time_base;
817  int64_t pts = frame ? frame->pts : pkt->pts;
818 
819  AVRational tbi = (AVRational){ 0, 1};
820  int64_t ptsi = INT64_MAX;
821 
822  const FrameData *fd;
823 
824  if ((frame && frame->opaque_ref) || (pkt && pkt->opaque_ref)) {
825  fd = (const FrameData*)(frame ? frame->opaque_ref->data : pkt->opaque_ref->data);
826  tbi = fd->tb;
827  ptsi = fd->pts;
828  }
829 
830  for (size_t i = 0; i < es->nb_components; i++) {
831  const EncStatsComponent *c = &es->components[i];
832 
833  switch (c->type) {
834  case ENC_STATS_LITERAL: avio_write (io, c->str, c->str_len); continue;
835  case ENC_STATS_FILE_IDX: avio_printf(io, "%d", ost->file_index); continue;
836  case ENC_STATS_STREAM_IDX: avio_printf(io, "%d", ost->index); continue;
837  case ENC_STATS_TIMEBASE: avio_printf(io, "%d/%d", tb.num, tb.den); continue;
838  case ENC_STATS_TIMEBASE_IN: avio_printf(io, "%d/%d", tbi.num, tbi.den); continue;
839  case ENC_STATS_PTS: avio_printf(io, "%"PRId64, pts); continue;
840  case ENC_STATS_PTS_IN: avio_printf(io, "%"PRId64, ptsi); continue;
841  case ENC_STATS_PTS_TIME: avio_printf(io, "%g", pts * av_q2d(tb)); continue;
842  case ENC_STATS_PTS_TIME_IN: avio_printf(io, "%g", ptsi == INT64_MAX ?
843  INFINITY : ptsi * av_q2d(tbi)); continue;
844  case ENC_STATS_FRAME_NUM: avio_printf(io, "%"PRIu64, frame_num); continue;
845  case ENC_STATS_FRAME_NUM_IN: avio_printf(io, "%"PRIu64, fd ? fd->idx : -1); continue;
846  }
847 
848  if (frame) {
849  switch (c->type) {
850  case ENC_STATS_SAMPLE_NUM: avio_printf(io, "%"PRIu64, ost->samples_encoded); continue;
851  case ENC_STATS_NB_SAMPLES: avio_printf(io, "%d", frame->nb_samples); continue;
852  default: av_assert0(0);
853  }
854  } else {
855  switch (c->type) {
856  case ENC_STATS_DTS: avio_printf(io, "%"PRId64, pkt->dts); continue;
857  case ENC_STATS_DTS_TIME: avio_printf(io, "%g", pkt->dts * av_q2d(tb)); continue;
858  case ENC_STATS_PKT_SIZE: avio_printf(io, "%d", pkt->size); continue;
859  case ENC_STATS_BITRATE: {
860  double duration = FFMAX(pkt->duration, 1) * av_q2d(tb);
861  avio_printf(io, "%g", 8.0 * pkt->size / duration);
862  continue;
863  }
864  case ENC_STATS_AVG_BITRATE: {
865  double duration = pkt->dts * av_q2d(tb);
866  avio_printf(io, "%g", duration > 0 ? 8.0 * ost->data_size_enc / duration : -1.);
867  continue;
868  }
869  default: av_assert0(0);
870  }
871  }
872  }
873  avio_w8(io, '\n');
874  avio_flush(io);
875 }
876 
878 {
879  AVCodecContext *enc = ost->enc_ctx;
880  AVPacket *pkt = ost->pkt;
881  const char *type_desc = av_get_media_type_string(enc->codec_type);
882  const char *action = frame ? "encode" : "flush";
883  int ret;
884 
885  if (frame) {
886  if (ost->enc_stats_pre.io)
887  enc_stats_write(ost, &ost->enc_stats_pre, frame, NULL,
888  ost->frames_encoded);
889 
890  ost->frames_encoded++;
891  ost->samples_encoded += frame->nb_samples;
892 
893  if (debug_ts) {
894  av_log(ost, AV_LOG_INFO, "encoder <- type:%s "
895  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
896  type_desc,
897  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
898  enc->time_base.num, enc->time_base.den);
899  }
900  }
901 
903 
904  ret = avcodec_send_frame(enc, frame);
905  if (ret < 0 && !(ret == AVERROR_EOF && !frame)) {
906  av_log(ost, AV_LOG_ERROR, "Error submitting %s frame to the encoder\n",
907  type_desc);
908  return ret;
909  }
910 
911  while (1) {
913  update_benchmark("%s_%s %d.%d", action, type_desc,
914  ost->file_index, ost->index);
915 
916  pkt->time_base = enc->time_base;
917 
918  /* if two pass, output log on success and EOF */
919  if ((ret >= 0 || ret == AVERROR_EOF) && ost->logfile && enc->stats_out)
920  fprintf(ost->logfile, "%s", enc->stats_out);
921 
922  if (ret == AVERROR(EAGAIN)) {
923  av_assert0(frame); // should never happen during flushing
924  return 0;
925  } else if (ret == AVERROR_EOF) {
926  of_output_packet(of, pkt, ost, 1);
927  return ret;
928  } else if (ret < 0) {
929  av_log(ost, AV_LOG_ERROR, "%s encoding failed\n", type_desc);
930  return ret;
931  }
932 
933  if (enc->codec_type == AVMEDIA_TYPE_VIDEO)
935  if (ost->enc_stats_post.io)
936  enc_stats_write(ost, &ost->enc_stats_post, NULL, pkt,
937  ost->packets_encoded);
938 
939  if (debug_ts) {
940  av_log(ost, AV_LOG_INFO, "encoder -> type:%s "
941  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s "
942  "duration:%s duration_time:%s\n",
943  type_desc,
947  }
948 
949  av_packet_rescale_ts(pkt, pkt->time_base, ost->mux_timebase);
950  pkt->time_base = ost->mux_timebase;
951 
952  if (debug_ts) {
953  av_log(ost, AV_LOG_INFO, "encoder -> type:%s "
954  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s "
955  "duration:%s duration_time:%s\n",
956  type_desc,
960  }
961 
964  "Subtitle heartbeat logic failed in %s! (%s)\n",
965  __func__, av_err2str(ret));
966  exit_program(1);
967  }
968 
969  ost->data_size_enc += pkt->size;
970 
971  ost->packets_encoded++;
972 
973  of_output_packet(of, pkt, ost, 0);
974  }
975 
976  av_assert0(0);
977 }
978 
980  AVFrame *frame)
981 {
982  int ret;
983 
984  if (ost->sq_idx_encode < 0)
985  return encode_frame(of, ost, frame);
986 
987  if (frame) {
988  ret = av_frame_ref(ost->sq_frame, frame);
989  if (ret < 0)
990  return ret;
991  frame = ost->sq_frame;
992  }
993 
994  ret = sq_send(of->sq_encode, ost->sq_idx_encode,
995  SQFRAME(frame));
996  if (ret < 0) {
997  if (frame)
999  if (ret != AVERROR_EOF)
1000  return ret;
1001  }
1002 
1003  while (1) {
1004  AVFrame *enc_frame = ost->sq_frame;
1005 
1006  ret = sq_receive(of->sq_encode, ost->sq_idx_encode,
1007  SQFRAME(enc_frame));
1008  if (ret == AVERROR_EOF) {
1009  enc_frame = NULL;
1010  } else if (ret < 0) {
1011  return (ret == AVERROR(EAGAIN)) ? 0 : ret;
1012  }
1013 
1014  ret = encode_frame(of, ost, enc_frame);
1015  if (enc_frame)
1016  av_frame_unref(enc_frame);
1017  if (ret < 0) {
1018  if (ret == AVERROR_EOF)
1020  return ret;
1021  }
1022  }
1023 }
1024 
1026  AVFrame *frame)
1027 {
1028  AVCodecContext *enc = ost->enc_ctx;
1029  int ret;
1030 
1031  if (frame->pts == AV_NOPTS_VALUE)
1032  frame->pts = ost->next_pts;
1033  else {
1034  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1035  frame->pts =
1036  av_rescale_q(frame->pts, frame->time_base, enc->time_base) -
1038  }
1039  frame->time_base = enc->time_base;
1040 
1041  if (!check_recording_time(ost, frame->pts, frame->time_base))
1042  return;
1043 
1044  ost->next_pts = frame->pts + frame->nb_samples;
1045 
1046  ret = submit_encode_frame(of, ost, frame);
1047  if (ret < 0 && ret != AVERROR_EOF)
1048  exit_program(1);
1049 }
1050 
1051 static void do_subtitle_out(OutputFile *of,
1052  OutputStream *ost,
1053  AVSubtitle *sub)
1054 {
1055  int subtitle_out_max_size = 1024 * 1024;
1056  int subtitle_out_size, nb, i, ret;
1057  AVCodecContext *enc;
1058  AVPacket *pkt = ost->pkt;
1059  int64_t pts;
1060 
1061  if (sub->pts == AV_NOPTS_VALUE) {
1062  av_log(ost, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1063  if (exit_on_error)
1064  exit_program(1);
1065  return;
1066  }
1067 
1068  enc = ost->enc_ctx;
1069 
1070  /* Note: DVB subtitle need one packet to draw them and one other
1071  packet to clear them */
1072  /* XXX: signal it in the codec context ? */
1073  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1074  nb = 2;
1075  else
1076  nb = 1;
1077 
1078  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1079  pts = sub->pts;
1080  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1081  pts -= output_files[ost->file_index]->start_time;
1082  for (i = 0; i < nb; i++) {
1083  unsigned save_num_rects = sub->num_rects;
1084 
1086  return;
1087 
1088  ret = av_new_packet(pkt, subtitle_out_max_size);
1089  if (ret < 0)
1090  report_and_exit(AVERROR(ENOMEM));
1091 
1092  sub->pts = pts;
1093  // start_display_time is required to be 0
1094  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1095  sub->end_display_time -= sub->start_display_time;
1096  sub->start_display_time = 0;
1097  if (i == 1)
1098  sub->num_rects = 0;
1099 
1100  ost->frames_encoded++;
1101 
1102  subtitle_out_size = avcodec_encode_subtitle(enc, pkt->data, pkt->size, sub);
1103  if (i == 1)
1104  sub->num_rects = save_num_rects;
1105  if (subtitle_out_size < 0) {
1106  av_log(ost, AV_LOG_FATAL, "Subtitle encoding failed\n");
1107  exit_program(1);
1108  }
1109 
1110  av_shrink_packet(pkt, subtitle_out_size);
1111  pkt->time_base = ost->mux_timebase;
1113  pkt->duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, pkt->time_base);
1114  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1115  /* XXX: the pts correction is handled here. Maybe handling
1116  it in the codec would be better */
1117  if (i == 0)
1118  pkt->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, pkt->time_base);
1119  else
1120  pkt->pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, pkt->time_base);
1121  }
1122  pkt->dts = pkt->pts;
1123 
1124  of_output_packet(of, pkt, ost, 0);
1125  }
1126 }
1127 
1128 /* Convert frame timestamps to the encoder timebase and decide how many times
1129  * should this (and possibly previous) frame be repeated in order to conform to
1130  * desired target framerate (if any).
1131  */
1133  AVFrame *next_picture, double duration,
1134  int64_t *nb_frames, int64_t *nb_frames_prev)
1135 {
1136  double delta0, delta;
1137 
1138  double sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1139  /* delta0 is the "drift" between the input frame (next_picture) and
1140  * where it would fall in the output. */
1141  delta0 = sync_ipts - ost->next_pts;
1142  delta = delta0 + duration;
1143 
1144  // tracks the number of times the PREVIOUS frame should be duplicated,
1145  // mostly for variable framerate (VFR)
1146  *nb_frames_prev = 0;
1147  /* by default, we output a single frame */
1148  *nb_frames = 1;
1149 
1150  if (delta0 < 0 &&
1151  delta > 0 &&
1152  ost->vsync_method != VSYNC_PASSTHROUGH &&
1153  ost->vsync_method != VSYNC_DROP) {
1154  if (delta0 < -0.6) {
1155  av_log(ost, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1156  } else
1157  av_log(ost, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1158  sync_ipts = ost->next_pts;
1159  duration += delta0;
1160  delta0 = 0;
1161  }
1162 
1163  switch (ost->vsync_method) {
1164  case VSYNC_VSCFR:
1165  if (ost->vsync_frame_number == 0 && delta0 >= 0.5) {
1166  av_log(ost, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1167  delta = duration;
1168  delta0 = 0;
1169  ost->next_pts = llrint(sync_ipts);
1170  }
1171  case VSYNC_CFR:
1172  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1173  if (frame_drop_threshold && delta < frame_drop_threshold && ost->vsync_frame_number) {
1174  *nb_frames = 0;
1175  } else if (delta < -1.1)
1176  *nb_frames = 0;
1177  else if (delta > 1.1) {
1178  *nb_frames = llrintf(delta);
1179  if (delta0 > 1.1)
1180  *nb_frames_prev = llrintf(delta0 - 0.6);
1181  }
1182  next_picture->duration = 1;
1183  break;
1184  case VSYNC_VFR:
1185  if (delta <= -0.6)
1186  *nb_frames = 0;
1187  else if (delta > 0.6)
1188  ost->next_pts = llrint(sync_ipts);
1189  next_picture->duration = duration;
1190  break;
1191  case VSYNC_DROP:
1192  case VSYNC_PASSTHROUGH:
1193  next_picture->duration = duration;
1194  ost->next_pts = llrint(sync_ipts);
1195  break;
1196  default:
1197  av_assert0(0);
1198  }
1199 }
1200 
1201 static enum AVPictureType forced_kf_apply(void *logctx, KeyframeForceCtx *kf,
1202  AVRational tb, const AVFrame *in_picture,
1203  int dup_idx)
1204 {
1205  double pts_time;
1206 
1207  if (kf->ref_pts == AV_NOPTS_VALUE)
1208  kf->ref_pts = in_picture->pts;
1209 
1210  pts_time = (in_picture->pts - kf->ref_pts) * av_q2d(tb);
1211  if (kf->index < kf->nb_pts &&
1212  av_compare_ts(in_picture->pts, tb, kf->pts[kf->index], AV_TIME_BASE_Q) >= 0) {
1213  kf->index++;
1214  goto force_keyframe;
1215  } else if (kf->pexpr) {
1216  double res;
1217  kf->expr_const_values[FKF_T] = pts_time;
1218  res = av_expr_eval(kf->pexpr,
1219  kf->expr_const_values, NULL);
1220  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1221  kf->expr_const_values[FKF_N],
1224  kf->expr_const_values[FKF_T],
1226  res);
1227 
1228  kf->expr_const_values[FKF_N] += 1;
1229 
1230  if (res) {
1233  kf->expr_const_values[FKF_N_FORCED] += 1;
1234  goto force_keyframe;
1235  }
1236  } else if (kf->type == KF_FORCE_SOURCE &&
1237  in_picture->key_frame == 1 && !dup_idx) {
1238  goto force_keyframe;
1239  } else if (kf->type == KF_FORCE_SOURCE_NO_DROP && !dup_idx) {
1240  kf->dropped_keyframe = 0;
1241  if ((in_picture->key_frame == 1) || kf->dropped_keyframe)
1242  goto force_keyframe;
1243  }
1244 
1245  return AV_PICTURE_TYPE_NONE;
1246 
1247 force_keyframe:
1248  av_log(logctx, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1249  return AV_PICTURE_TYPE_I;
1250 }
1251 
1252 /* May modify/reset next_picture */
1253 static void do_video_out(OutputFile *of,
1254  OutputStream *ost,
1255  AVFrame *next_picture)
1256 {
1257  int ret;
1258  AVCodecContext *enc = ost->enc_ctx;
1259  AVRational frame_rate;
1260  int64_t nb_frames, nb_frames_prev, i;
1261  double duration = 0;
1262  InputStream *ist = ost->ist;
1263  AVFilterContext *filter = ost->filter->filter;
1264 
1265  init_output_stream_wrapper(ost, next_picture, 1);
1266 
1267  frame_rate = av_buffersink_get_frame_rate(filter);
1268  if (frame_rate.num > 0 && frame_rate.den > 0)
1269  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1270 
1271  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1272  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1273 
1274  if (!ost->filters_script &&
1275  !ost->filters &&
1276  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1277  next_picture &&
1278  ist &&
1279  lrintf(next_picture->duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1280  duration = lrintf(next_picture->duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1281  }
1282 
1283  if (!next_picture) {
1284  //end, flushing
1285  nb_frames_prev = nb_frames = mid_pred(ost->last_nb0_frames[0],
1286  ost->last_nb0_frames[1],
1287  ost->last_nb0_frames[2]);
1288  } else {
1289  video_sync_process(of, ost, next_picture, duration,
1290  &nb_frames, &nb_frames_prev);
1291  }
1292 
1293  memmove(ost->last_nb0_frames + 1,
1294  ost->last_nb0_frames,
1295  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1296  ost->last_nb0_frames[0] = nb_frames_prev;
1297 
1298  if (nb_frames_prev == 0 && ost->last_dropped) {
1299  nb_frames_drop++;
1301  "*** dropping frame %"PRId64" at ts %"PRId64"\n",
1302  ost->vsync_frame_number, ost->last_frame->pts);
1303  }
1304  if (nb_frames > (nb_frames_prev && ost->last_dropped) + (nb_frames > nb_frames_prev)) {
1305  if (nb_frames > dts_error_threshold * 30) {
1306  av_log(ost, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", nb_frames - 1);
1307  nb_frames_drop++;
1308  return;
1309  }
1310  nb_frames_dup += nb_frames - (nb_frames_prev && ost->last_dropped) - (nb_frames > nb_frames_prev);
1311  av_log(ost, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", nb_frames - 1);
1312  if (nb_frames_dup > dup_warning) {
1313  av_log(ost, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", dup_warning);
1314  dup_warning *= 10;
1315  }
1316  }
1317  ost->last_dropped = nb_frames == nb_frames_prev && next_picture;
1318  ost->kf.dropped_keyframe = ost->last_dropped && next_picture && next_picture->key_frame;
1319 
1320  /* duplicates frame if needed */
1321  for (i = 0; i < nb_frames; i++) {
1322  AVFrame *in_picture;
1323 
1324  if (i < nb_frames_prev && ost->last_frame->buf[0]) {
1325  in_picture = ost->last_frame;
1326  } else
1327  in_picture = next_picture;
1328 
1329  if (!in_picture)
1330  return;
1331 
1332  in_picture->pts = ost->next_pts;
1333 
1334  if (!check_recording_time(ost, in_picture->pts, ost->enc_ctx->time_base))
1335  return;
1336 
1337  in_picture->quality = enc->global_quality;
1338  in_picture->pict_type = forced_kf_apply(ost, &ost->kf, enc->time_base, in_picture, i);
1339 
1340  ret = submit_encode_frame(of, ost, in_picture);
1341  if (ret == AVERROR_EOF)
1342  break;
1343  else if (ret < 0)
1344  exit_program(1);
1345 
1346  ost->next_pts++;
1347  ost->vsync_frame_number++;
1348  }
1349 
1350  av_frame_unref(ost->last_frame);
1351  if (next_picture)
1352  av_frame_move_ref(ost->last_frame, next_picture);
1353 }
1354 
1355 /**
1356  * Get and encode new output from any of the filtergraphs, without causing
1357  * activity.
1358  *
1359  * @return 0 for success, <0 for severe errors
1360  */
1361 static int reap_filters(int flush)
1362 {
1363  AVFrame *filtered_frame = NULL;
1364 
1365  /* Reap all buffers present in the buffer sinks */
1366  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
1367  OutputFile *of = output_files[ost->file_index];
1369  AVCodecContext *enc = ost->enc_ctx;
1370  int ret = 0;
1371 
1372  if (!ost->filter || !ost->filter->graph->graph)
1373  continue;
1374  filter = ost->filter->filter;
1375 
1376  /*
1377  * Unlike video, with audio the audio frame size matters.
1378  * Currently we are fully reliant on the lavfi filter chain to
1379  * do the buffering deed for us, and thus the frame size parameter
1380  * needs to be set accordingly. Where does one get the required
1381  * frame size? From the initialized AVCodecContext of an audio
1382  * encoder. Thus, if we have gotten to an audio stream, initialize
1383  * the encoder earlier than receiving the first AVFrame.
1384  */
1387 
1388  filtered_frame = ost->filtered_frame;
1389 
1390  while (1) {
1391  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1393  if (ret < 0) {
1394  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1396  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1397  } else if (flush && ret == AVERROR_EOF) {
1399  do_video_out(of, ost, NULL);
1400  }
1401  break;
1402  }
1403  if (ost->finished) {
1404  av_frame_unref(filtered_frame);
1405  continue;
1406  }
1407 
1408  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1410  ost->last_filter_pts = av_rescale_q(filtered_frame->pts, tb,
1411  AV_TIME_BASE_Q);
1412  filtered_frame->time_base = tb;
1413 
1414  if (debug_ts)
1415  av_log(NULL, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
1416  av_ts2str(filtered_frame->pts),
1417  av_ts2timestr(filtered_frame->pts, &tb),
1418  tb.num, tb.den);
1419  }
1420 
1421  switch (av_buffersink_get_type(filter)) {
1422  case AVMEDIA_TYPE_VIDEO:
1423  if (!ost->frame_aspect_ratio.num)
1424  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1425 
1426  do_video_out(of, ost, filtered_frame);
1427  break;
1428  case AVMEDIA_TYPE_AUDIO:
1429  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1430  enc->ch_layout.nb_channels != filtered_frame->ch_layout.nb_channels) {
1432  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1433  break;
1434  }
1435  do_audio_out(of, ost, filtered_frame);
1436  break;
1437  default:
1438  // TODO support subtitle filters
1439  av_assert0(0);
1440  }
1441 
1442  av_frame_unref(filtered_frame);
1443  }
1444  }
1445 
1446  return 0;
1447 }
1448 
1449 static void print_final_stats(int64_t total_size)
1450 {
1451  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1452  uint64_t subtitle_size = 0;
1453  uint64_t data_size = 0;
1454  float percent = -1.0;
1455  int i, j;
1456  int pass1_used = 1;
1457 
1458  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
1460  const uint64_t s = ost->data_size_mux;
1461 
1462  switch (par->codec_type) {
1463  case AVMEDIA_TYPE_VIDEO: video_size += s; break;
1464  case AVMEDIA_TYPE_AUDIO: audio_size += s; break;
1465  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += s; break;
1466  default: other_size += s; break;
1467  }
1468  extra_size += par->extradata_size;
1469  data_size += s;
1470  if (ost->enc_ctx &&
1471  (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1473  pass1_used = 0;
1474  }
1475 
1476  if (data_size && total_size>0 && total_size >= data_size)
1477  percent = 100.0 * (total_size - data_size) / data_size;
1478 
1479  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1480  video_size / 1024.0,
1481  audio_size / 1024.0,
1482  subtitle_size / 1024.0,
1483  other_size / 1024.0,
1484  extra_size / 1024.0);
1485  if (percent >= 0.0)
1486  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1487  else
1488  av_log(NULL, AV_LOG_INFO, "unknown");
1489  av_log(NULL, AV_LOG_INFO, "\n");
1490 
1491  /* print verbose per-stream stats */
1492  for (i = 0; i < nb_input_files; i++) {
1493  InputFile *f = input_files[i];
1494  uint64_t total_packets = 0, total_size = 0;
1495 
1496  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1497  i, f->ctx->url);
1498 
1499  for (j = 0; j < f->nb_streams; j++) {
1500  InputStream *ist = f->streams[j];
1501  enum AVMediaType type = ist->par->codec_type;
1502 
1503  total_size += ist->data_size;
1504  total_packets += ist->nb_packets;
1505 
1506  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1508  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1509  ist->nb_packets, ist->data_size);
1510 
1511  if (ist->decoding_needed) {
1512  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1513  ist->frames_decoded);
1514  if (type == AVMEDIA_TYPE_AUDIO)
1515  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1516  av_log(NULL, AV_LOG_VERBOSE, "; ");
1517  }
1518 
1519  av_log(NULL, AV_LOG_VERBOSE, "\n");
1520  }
1521 
1522  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1523  total_packets, total_size);
1524  }
1525 
1526  for (i = 0; i < nb_output_files; i++) {
1527  OutputFile *of = output_files[i];
1528  uint64_t total_packets = 0, total_size = 0;
1529 
1530  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1531  i, of->url);
1532 
1533  for (j = 0; j < of->nb_streams; j++) {
1534  OutputStream *ost = of->streams[j];
1535  enum AVMediaType type = ost->st->codecpar->codec_type;
1536 
1537  total_size += ost->data_size_mux;
1538  total_packets += atomic_load(&ost->packets_written);
1539 
1540  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1542  if (ost->enc_ctx) {
1543  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1544  ost->frames_encoded);
1545  if (type == AVMEDIA_TYPE_AUDIO)
1546  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1547  av_log(NULL, AV_LOG_VERBOSE, "; ");
1548  }
1549 
1550  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1551  atomic_load(&ost->packets_written), ost->data_size_mux);
1552 
1553  av_log(NULL, AV_LOG_VERBOSE, "\n");
1554  }
1555 
1556  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1557  total_packets, total_size);
1558  }
1559  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1560  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1561  if (pass1_used) {
1562  av_log(NULL, AV_LOG_WARNING, "\n");
1563  } else {
1564  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1565  }
1566  }
1567 }
1568 
1569 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1570 {
1571  AVBPrint buf, buf_script;
1572  int64_t total_size = of_filesize(output_files[0]);
1573  int vid;
1574  double bitrate;
1575  double speed;
1576  int64_t pts = INT64_MIN + 1;
1577  static int64_t last_time = -1;
1578  static int first_report = 1;
1579  static int qp_histogram[52];
1580  int hours, mins, secs, us;
1581  const char *hours_sign;
1582  int ret;
1583  float t;
1584 
1585  if (!print_stats && !is_last_report && !progress_avio)
1586  return;
1587 
1588  if (!is_last_report) {
1589  if (last_time == -1) {
1590  last_time = cur_time;
1591  }
1592  if (((cur_time - last_time) < stats_period && !first_report) ||
1593  (first_report && nb_output_dumped < nb_output_files))
1594  return;
1595  last_time = cur_time;
1596  }
1597 
1598  t = (cur_time-timer_start) / 1000000.0;
1599 
1600  vid = 0;
1602  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1603  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
1604  const AVCodecContext * const enc = ost->enc_ctx;
1605  const float q = enc ? ost->quality / (float) FF_QP2LAMBDA : -1;
1606 
1607  if (vid && ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
1608  av_bprintf(&buf, "q=%2.1f ", q);
1609  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1610  ost->file_index, ost->index, q);
1611  }
1612  if (!vid && ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
1613  float fps;
1614  uint64_t frame_number = atomic_load(&ost->packets_written);
1615 
1616  fps = t > 1 ? frame_number / t : 0;
1617  av_bprintf(&buf, "frame=%5"PRId64" fps=%3.*f q=%3.1f ",
1618  frame_number, fps < 9.95, fps, q);
1619  av_bprintf(&buf_script, "frame=%"PRId64"\n", frame_number);
1620  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1621  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1622  ost->file_index, ost->index, q);
1623  if (is_last_report)
1624  av_bprintf(&buf, "L");
1625  if (qp_hist) {
1626  int j;
1627  int qp = lrintf(q);
1628  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1629  qp_histogram[qp]++;
1630  for (j = 0; j < 32; j++)
1631  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1632  }
1633 
1634  if (enc && (enc->flags & AV_CODEC_FLAG_PSNR) &&
1635  (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1636  int j;
1637  double error, error_sum = 0;
1638  double scale, scale_sum = 0;
1639  double p;
1640  char type[3] = { 'Y','U','V' };
1641  av_bprintf(&buf, "PSNR=");
1642  for (j = 0; j < 3; j++) {
1643  if (is_last_report) {
1644  error = enc->error[j];
1645  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1646  } else {
1647  error = ost->error[j];
1648  scale = enc->width * enc->height * 255.0 * 255.0;
1649  }
1650  if (j)
1651  scale /= 4;
1652  error_sum += error;
1653  scale_sum += scale;
1654  p = psnr(error / scale);
1655  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1656  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1657  ost->file_index, ost->index, type[j] | 32, p);
1658  }
1659  p = psnr(error_sum / scale_sum);
1660  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1661  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1662  ost->file_index, ost->index, p);
1663  }
1664  vid = 1;
1665  }
1666  /* compute min output value */
1667  if (ost->last_mux_dts != AV_NOPTS_VALUE) {
1668  pts = FFMAX(pts, ost->last_mux_dts);
1669  if (copy_ts) {
1670  if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1674  }
1675  }
1676 
1677  if (is_last_report)
1678  nb_frames_drop += ost->last_dropped;
1679  }
1680 
1681  secs = FFABS(pts) / AV_TIME_BASE;
1682  us = FFABS(pts) % AV_TIME_BASE;
1683  mins = secs / 60;
1684  secs %= 60;
1685  hours = mins / 60;
1686  mins %= 60;
1687  hours_sign = (pts < 0) ? "-" : "";
1688 
1689  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1690  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1691 
1692  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1693  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1694  if (pts == AV_NOPTS_VALUE) {
1695  av_bprintf(&buf, "N/A ");
1696  } else {
1697  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1698  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1699  }
1700 
1701  if (bitrate < 0) {
1702  av_bprintf(&buf, "bitrate=N/A");
1703  av_bprintf(&buf_script, "bitrate=N/A\n");
1704  }else{
1705  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1706  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1707  }
1708 
1709  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1710  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1711  if (pts == AV_NOPTS_VALUE) {
1712  av_bprintf(&buf_script, "out_time_us=N/A\n");
1713  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1714  av_bprintf(&buf_script, "out_time=N/A\n");
1715  } else {
1716  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1717  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1718  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1719  hours_sign, hours, mins, secs, us);
1720  }
1721 
1723  av_bprintf(&buf, " dup=%"PRId64" drop=%"PRId64, nb_frames_dup, nb_frames_drop);
1724  av_bprintf(&buf_script, "dup_frames=%"PRId64"\n", nb_frames_dup);
1725  av_bprintf(&buf_script, "drop_frames=%"PRId64"\n", nb_frames_drop);
1726 
1727  if (speed < 0) {
1728  av_bprintf(&buf, " speed=N/A");
1729  av_bprintf(&buf_script, "speed=N/A\n");
1730  } else {
1731  av_bprintf(&buf, " speed=%4.3gx", speed);
1732  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1733  }
1734 
1735  if (print_stats || is_last_report) {
1736  const char end = is_last_report ? '\n' : '\r';
1737  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1738  fprintf(stderr, "%s %c", buf.str, end);
1739  } else
1740  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1741 
1742  fflush(stderr);
1743  }
1744  av_bprint_finalize(&buf, NULL);
1745 
1746  if (progress_avio) {
1747  av_bprintf(&buf_script, "progress=%s\n",
1748  is_last_report ? "end" : "continue");
1749  avio_write(progress_avio, buf_script.str,
1750  FFMIN(buf_script.len, buf_script.size - 1));
1752  av_bprint_finalize(&buf_script, NULL);
1753  if (is_last_report) {
1754  if ((ret = avio_closep(&progress_avio)) < 0)
1756  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1757  }
1758  }
1759 
1760  first_report = 0;
1761 
1762  if (is_last_report)
1763  print_final_stats(total_size);
1764 }
1765 
1767 {
1768  int ret;
1769 
1770  // We never got any input. Set a fake format, which will
1771  // come from libavformat.
1772  ifilter->format = par->format;
1773  ifilter->sample_rate = par->sample_rate;
1774  ifilter->width = par->width;
1775  ifilter->height = par->height;
1778  if (ret < 0)
1779  return ret;
1780 
1781  return 0;
1782 }
1783 
1784 static void flush_encoders(void)
1785 {
1786  int ret;
1787 
1788  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
1789  OutputFile *of = output_files[ost->file_index];
1790  if (ost->sq_idx_encode >= 0)
1791  sq_send(of->sq_encode, ost->sq_idx_encode, SQFRAME(NULL));
1792  }
1793 
1794  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
1795  AVCodecContext *enc = ost->enc_ctx;
1796  OutputFile *of = output_files[ost->file_index];
1797 
1798  if (!enc)
1799  continue;
1800 
1801  // Try to enable encoding with no input frames.
1802  // Maybe we should just let encoding fail instead.
1803  if (!ost->initialized) {
1804  FilterGraph *fg = ost->filter->graph;
1805 
1807  "Finishing stream without any data written to it.\n");
1808 
1809  if (ost->filter && !fg->graph) {
1810  int x;
1811  for (x = 0; x < fg->nb_inputs; x++) {
1812  InputFilter *ifilter = fg->inputs[x];
1813  if (ifilter->format < 0 &&
1814  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->par) < 0) {
1815  av_log(ost, AV_LOG_ERROR, "Error copying paramerets from input stream\n");
1816  exit_program(1);
1817  }
1818  }
1819 
1821  continue;
1822 
1823  ret = configure_filtergraph(fg);
1824  if (ret < 0) {
1825  av_log(ost, AV_LOG_ERROR, "Error configuring filter graph\n");
1826  exit_program(1);
1827  }
1828 
1829  of_output_packet(of, ost->pkt, ost, 1);
1830  }
1831 
1833  }
1834 
1836  continue;
1837 
1838  ret = submit_encode_frame(of, ost, NULL);
1839  if (ret != AVERROR_EOF)
1840  exit_program(1);
1841  }
1842 }
1843 
1844 /*
1845  * Check whether a packet from ist should be written into ost at this time
1846  */
1848 {
1849  OutputFile *of = output_files[ost->file_index];
1850 
1851  if (ost->ist != ist)
1852  return 0;
1853 
1854  if (ost->finished & MUXER_FINISHED)
1855  return 0;
1856 
1857  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1858  return 0;
1859 
1860  return 1;
1861 }
1862 
1864 {
1865  OutputFile *of = output_files[ost->file_index];
1866  InputFile *f = input_files [ist->file_index];
1867  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1868  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1869  AVPacket *opkt = ost->pkt;
1870 
1871  av_packet_unref(opkt);
1872  // EOF: flush output bitstream filters.
1873  if (!pkt) {
1874  of_output_packet(of, opkt, ost, 1);
1875  return;
1876  }
1877 
1878  if (!ost->streamcopy_started && !(pkt->flags & AV_PKT_FLAG_KEY) &&
1879  !ost->copy_initial_nonkeyframes)
1880  return;
1881 
1882  if (!ost->streamcopy_started && !ost->copy_prior_start) {
1883  if (pkt->pts == AV_NOPTS_VALUE ?
1884  ist->pts < ost->ts_copy_start :
1885  pkt->pts < av_rescale_q(ost->ts_copy_start, AV_TIME_BASE_Q, ist->st->time_base))
1886  return;
1887  }
1888 
1889  if (of->recording_time != INT64_MAX &&
1890  ist->pts >= of->recording_time + start_time) {
1892  return;
1893  }
1894 
1895  if (f->recording_time != INT64_MAX) {
1896  start_time = 0;
1897  if (copy_ts) {
1898  start_time += f->start_time != AV_NOPTS_VALUE ? f->start_time : 0;
1899  start_time += start_at_zero ? 0 : f->start_time_effective;
1900  }
1901  if (ist->pts >= f->recording_time + start_time) {
1903  return;
1904  }
1905  }
1906 
1907  if (av_packet_ref(opkt, pkt) < 0)
1908  exit_program(1);
1909 
1910  opkt->time_base = ost->mux_timebase;
1911 
1912  if (pkt->pts != AV_NOPTS_VALUE)
1913  opkt->pts = av_rescale_q(pkt->pts, ist->st->time_base, opkt->time_base) - ost_tb_start_time;
1914 
1915  if (pkt->dts == AV_NOPTS_VALUE) {
1916  opkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, opkt->time_base);
1917  } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
1919  if(!duration)
1920  duration = ist->par->frame_size;
1921  opkt->dts = av_rescale_delta(ist->st->time_base, pkt->dts,
1922  (AVRational){1, ist->par->sample_rate}, duration,
1924  /* dts will be set immediately afterwards to what pts is now */
1925  opkt->pts = opkt->dts - ost_tb_start_time;
1926  } else
1927  opkt->dts = av_rescale_q(pkt->dts, ist->st->time_base, opkt->time_base);
1928  opkt->dts -= ost_tb_start_time;
1929 
1930  opkt->duration = av_rescale_q(pkt->duration, ist->st->time_base, opkt->time_base);
1931 
1932  {
1934  if (ret < 0) {
1936  "Subtitle heartbeat logic failed in %s! (%s)\n",
1937  __func__, av_err2str(ret));
1938  exit_program(1);
1939  }
1940  }
1941 
1942  of_output_packet(of, opkt, ost, 0);
1943 
1944  ost->streamcopy_started = 1;
1945 }
1946 
1947 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1948 {
1949  if (*got_output || ret<0)
1950  decode_error_stat[ret<0] ++;
1951 
1952  if (ret < 0 && exit_on_error)
1953  exit_program(1);
1954 
1955  if (*got_output && ist) {
1958  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
1959  if (exit_on_error)
1960  exit_program(1);
1961  }
1962  }
1963 }
1964 
1965 // Filters can be configured only if the formats of all inputs are known.
1967 {
1968  int i;
1969  for (i = 0; i < fg->nb_inputs; i++) {
1970  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
1971  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
1972  return 0;
1973  }
1974  return 1;
1975 }
1976 
1977 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_reference)
1978 {
1979  FilterGraph *fg = ifilter->graph;
1980  AVFrameSideData *sd;
1981  int need_reinit, ret;
1982  int buffersrc_flags = AV_BUFFERSRC_FLAG_PUSH;
1983 
1984  if (keep_reference)
1985  buffersrc_flags |= AV_BUFFERSRC_FLAG_KEEP_REF;
1986 
1987  /* determine if the parameters for this input changed */
1988  need_reinit = ifilter->format != frame->format;
1989 
1990  switch (ifilter->ist->par->codec_type) {
1991  case AVMEDIA_TYPE_AUDIO:
1992  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
1993  av_channel_layout_compare(&ifilter->ch_layout, &frame->ch_layout);
1994  break;
1995  case AVMEDIA_TYPE_VIDEO:
1996  need_reinit |= ifilter->width != frame->width ||
1997  ifilter->height != frame->height;
1998  break;
1999  }
2000 
2001  if (!ifilter->ist->reinit_filters && fg->graph)
2002  need_reinit = 0;
2003 
2004  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2005  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2006  need_reinit = 1;
2007 
2009  if (!ifilter->displaymatrix || memcmp(sd->data, ifilter->displaymatrix, sizeof(int32_t) * 9))
2010  need_reinit = 1;
2011  } else if (ifilter->displaymatrix)
2012  need_reinit = 1;
2013 
2014  if (need_reinit) {
2016  if (ret < 0)
2017  return ret;
2018  }
2019 
2020  /* (re)init the graph if possible, otherwise buffer the frame and return */
2021  if (need_reinit || !fg->graph) {
2022  if (!ifilter_has_all_input_formats(fg)) {
2024  if (!tmp)
2025  return AVERROR(ENOMEM);
2026 
2027  ret = av_fifo_write(ifilter->frame_queue, &tmp, 1);
2028  if (ret < 0)
2029  av_frame_free(&tmp);
2030 
2031  return ret;
2032  }
2033 
2034  ret = reap_filters(1);
2035  if (ret < 0 && ret != AVERROR_EOF) {
2036  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2037  return ret;
2038  }
2039 
2040  ret = configure_filtergraph(fg);
2041  if (ret < 0) {
2042  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2043  return ret;
2044  }
2045  }
2046 
2047  ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, buffersrc_flags);
2048  if (ret < 0) {
2049  if (ret != AVERROR_EOF)
2050  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2051  return ret;
2052  }
2053 
2054  return 0;
2055 }
2056 
2057 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2058 {
2059  int ret;
2060 
2061  ifilter->eof = 1;
2062 
2063  if (ifilter->filter) {
2065  if (ret < 0)
2066  return ret;
2067  } else {
2068  // the filtergraph was never configured
2069  if (ifilter->format < 0) {
2070  ret = ifilter_parameters_from_codecpar(ifilter, ifilter->ist->par);
2071  if (ret < 0)
2072  return ret;
2073  }
2074  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2075  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2076  return AVERROR_INVALIDDATA;
2077  }
2078  }
2079 
2080  return 0;
2081 }
2082 
2083 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2084 // There is the following difference: if you got a frame, you must call
2085 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2086 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2087 static int decode(InputStream *ist, AVCodecContext *avctx,
2088  AVFrame *frame, int *got_frame, AVPacket *pkt)
2089 {
2090  int ret;
2091 
2092  *got_frame = 0;
2093 
2094  if (pkt) {
2095  ret = avcodec_send_packet(avctx, pkt);
2096  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2097  // decoded frames with avcodec_receive_frame() until done.
2098  if (ret < 0 && ret != AVERROR_EOF)
2099  return ret;
2100  }
2101 
2102  ret = avcodec_receive_frame(avctx, frame);
2103  if (ret < 0 && ret != AVERROR(EAGAIN))
2104  return ret;
2105  if (ret >= 0) {
2106  if (ist->want_frame_data) {
2107  FrameData *fd;
2108 
2109  av_assert0(!frame->opaque_ref);
2110  frame->opaque_ref = av_buffer_allocz(sizeof(*fd));
2111  if (!frame->opaque_ref) {
2113  return AVERROR(ENOMEM);
2114  }
2115  fd = (FrameData*)frame->opaque_ref->data;
2116  fd->pts = frame->pts;
2117  fd->tb = avctx->pkt_timebase;
2118  fd->idx = avctx->frame_num - 1;
2119  }
2120 
2121  *got_frame = 1;
2122  }
2123 
2124  return 0;
2125 }
2126 
2128 {
2129  int i, ret;
2130 
2131  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2132  for (i = 0; i < ist->nb_filters; i++) {
2133  ret = ifilter_send_frame(ist->filters[i], decoded_frame, i < ist->nb_filters - 1);
2134  if (ret == AVERROR_EOF)
2135  ret = 0; /* ignore */
2136  if (ret < 0) {
2138  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2139  break;
2140  }
2141  }
2142  return ret;
2143 }
2144 
2146  int *decode_failed)
2147 {
2149  AVCodecContext *avctx = ist->dec_ctx;
2150  int ret, err = 0;
2151  AVRational decoded_frame_tb;
2152 
2154  ret = decode(ist, avctx, decoded_frame, got_output, pkt);
2155  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2156  if (ret < 0)
2157  *decode_failed = 1;
2158 
2159  if (ret != AVERROR_EOF)
2161 
2162  if (!*got_output || ret < 0)
2163  return ret;
2164 
2166  ist->frames_decoded++;
2167 
2168  /* increment next_dts to use for the case where the input stream does not
2169  have timestamps or there are multiple frames in the packet */
2170  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2172  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2174 
2175  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2176  decoded_frame_tb = ist->st->time_base;
2177  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2178  decoded_frame->pts = pkt->pts;
2179  decoded_frame_tb = ist->st->time_base;
2180  }else {
2181  decoded_frame->pts = ist->dts;
2182  decoded_frame_tb = AV_TIME_BASE_Q;
2183  }
2184  if (pkt && pkt->duration && ist->prev_pkt_pts != AV_NOPTS_VALUE &&
2185  pkt->pts != AV_NOPTS_VALUE && pkt->pts - ist->prev_pkt_pts > pkt->duration)
2187  if (pkt)
2188  ist->prev_pkt_pts = pkt->pts;
2190  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2191  (AVRational){1, decoded_frame->sample_rate},
2194  (AVRational){1, decoded_frame->sample_rate});
2197 
2199  return err < 0 ? err : ret;
2200 }
2201 
2202 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2203  int *decode_failed)
2204 {
2206  int i, ret = 0, err = 0;
2207  int64_t best_effort_timestamp;
2208  int64_t dts = AV_NOPTS_VALUE;
2209 
2210  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2211  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2212  // skip the packet.
2213  if (!eof && pkt && pkt->size == 0)
2214  return 0;
2215 
2216  if (ist->dts != AV_NOPTS_VALUE)
2217  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2218  if (pkt) {
2219  pkt->dts = dts; // ffmpeg.c probably shouldn't do this
2220  }
2221 
2222  // The old code used to set dts on the drain packet, which does not work
2223  // with the new API anymore.
2224  if (eof) {
2225  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2226  if (!new)
2227  return AVERROR(ENOMEM);
2228  ist->dts_buffer = new;
2229  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2230  }
2231 
2233  ret = decode(ist, ist->dec_ctx, decoded_frame, got_output, pkt);
2234  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2235  if (ret < 0)
2236  *decode_failed = 1;
2237 
2238  // The following line may be required in some cases where there is no parser
2239  // or the parser does not has_b_frames correctly
2240  if (ist->par->video_delay < ist->dec_ctx->has_b_frames) {
2241  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2242  ist->par->video_delay = ist->dec_ctx->has_b_frames;
2243  } else
2245  "video_delay is larger in decoder than demuxer %d > %d.\n"
2246  "If you want to help, upload a sample "
2247  "of this file to https://streams.videolan.org/upload/ "
2248  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2249  ist->dec_ctx->has_b_frames,
2250  ist->par->video_delay);
2251  }
2252 
2253  if (ret != AVERROR_EOF)
2255 
2256  if (*got_output && ret >= 0) {
2257  if (ist->dec_ctx->width != decoded_frame->width ||
2258  ist->dec_ctx->height != decoded_frame->height ||
2259  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2260  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2264  ist->dec_ctx->width,
2265  ist->dec_ctx->height,
2266  ist->dec_ctx->pix_fmt);
2267  }
2268  }
2269 
2270  if (!*got_output || ret < 0)
2271  return ret;
2272 
2273  if(ist->top_field_first>=0)
2275 
2276  ist->frames_decoded++;
2277 
2279  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2280  if (err < 0)
2281  goto fail;
2282  }
2283 
2284  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2285  *duration_pts = decoded_frame->duration;
2286 
2287  if (ist->framerate.num)
2288  best_effort_timestamp = ist->cfr_next_pts++;
2289 
2290  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2291  best_effort_timestamp = ist->dts_buffer[0];
2292 
2293  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2294  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2295  ist->nb_dts_buffer--;
2296  }
2297 
2298  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2299  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2300 
2301  if (ts != AV_NOPTS_VALUE)
2302  ist->next_pts = ist->pts = ts;
2303  }
2304 
2305  if (debug_ts) {
2306  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2307  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2308  ist->st->index, av_ts2str(decoded_frame->pts),
2310  best_effort_timestamp,
2311  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2313  ist->st->time_base.num, ist->st->time_base.den);
2314  }
2315 
2316  if (ist->st->sample_aspect_ratio.num)
2318 
2320 
2321 fail:
2323  return err < 0 ? err : ret;
2324 }
2325 
2327 {
2328  int ret = 0;
2329  int free_sub = 1;
2330 
2331  if (ist->fix_sub_duration) {
2332  int end = 1;
2333  if (ist->prev_sub.got_output) {
2334  end = av_rescale(subtitle->pts - ist->prev_sub.subtitle.pts,
2335  1000, AV_TIME_BASE);
2336  if (end < ist->prev_sub.subtitle.end_display_time) {
2338  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2340  end <= 0 ? ", dropping it" : "");
2341  ist->prev_sub.subtitle.end_display_time = end;
2342  }
2343  }
2344  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2345  FFSWAP(int, ret, ist->prev_sub.ret);
2347  if (end <= 0)
2348  goto out;
2349  }
2350 
2351  if (!*got_output)
2352  return ret;
2353 
2354  if (ist->sub2video.frame) {
2355  sub2video_update(ist, INT64_MIN, subtitle);
2356  } else if (ist->nb_filters) {
2357  if (!ist->sub2video.sub_queue)
2359  if (!ist->sub2video.sub_queue)
2360  report_and_exit(AVERROR(ENOMEM));
2361 
2363  if (ret < 0)
2364  exit_program(1);
2365  free_sub = 0;
2366  }
2367 
2368  if (!subtitle->num_rects)
2369  goto out;
2370 
2371  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
2372  if (!check_output_constraints(ist, ost) || !ost->enc_ctx
2373  || ost->enc_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE)
2374  continue;
2375 
2376  do_subtitle_out(output_files[ost->file_index], ost, subtitle);
2377  }
2378 
2379 out:
2380  if (free_sub)
2382  return ret;
2383 }
2384 
2386 {
2387  int ret = AVERROR_BUG;
2388  AVSubtitle tmp = {
2389  .format = src->format,
2390  .start_display_time = src->start_display_time,
2391  .end_display_time = src->end_display_time,
2392  .num_rects = 0,
2393  .rects = NULL,
2394  .pts = src->pts
2395  };
2396 
2397  if (!src->num_rects)
2398  goto success;
2399 
2400  if (!(tmp.rects = av_calloc(src->num_rects, sizeof(*tmp.rects))))
2401  return AVERROR(ENOMEM);
2402 
2403  for (int i = 0; i < src->num_rects; i++) {
2404  AVSubtitleRect *src_rect = src->rects[i];
2405  AVSubtitleRect *dst_rect;
2406 
2407  if (!(dst_rect = tmp.rects[i] = av_mallocz(sizeof(*tmp.rects[0])))) {
2408  ret = AVERROR(ENOMEM);
2409  goto cleanup;
2410  }
2411 
2412  tmp.num_rects++;
2413 
2414  dst_rect->type = src_rect->type;
2415  dst_rect->flags = src_rect->flags;
2416 
2417  dst_rect->x = src_rect->x;
2418  dst_rect->y = src_rect->y;
2419  dst_rect->w = src_rect->w;
2420  dst_rect->h = src_rect->h;
2421  dst_rect->nb_colors = src_rect->nb_colors;
2422 
2423  if (src_rect->text)
2424  if (!(dst_rect->text = av_strdup(src_rect->text))) {
2425  ret = AVERROR(ENOMEM);
2426  goto cleanup;
2427  }
2428 
2429  if (src_rect->ass)
2430  if (!(dst_rect->ass = av_strdup(src_rect->ass))) {
2431  ret = AVERROR(ENOMEM);
2432  goto cleanup;
2433  }
2434 
2435  for (int j = 0; j < 4; j++) {
2436  // SUBTITLE_BITMAP images are special in the sense that they
2437  // are like PAL8 images. first pointer to data, second to
2438  // palette. This makes the size calculation match this.
2439  size_t buf_size = src_rect->type == SUBTITLE_BITMAP && j == 1 ?
2440  AVPALETTE_SIZE :
2441  src_rect->h * src_rect->linesize[j];
2442 
2443  if (!src_rect->data[j])
2444  continue;
2445 
2446  if (!(dst_rect->data[j] = av_memdup(src_rect->data[j], buf_size))) {
2447  ret = AVERROR(ENOMEM);
2448  goto cleanup;
2449  }
2450  dst_rect->linesize[j] = src_rect->linesize[j];
2451  }
2452  }
2453 
2454 success:
2455  *dst = tmp;
2456 
2457  return 0;
2458 
2459 cleanup:
2460  avsubtitle_free(&tmp);
2461 
2462  return ret;
2463 }
2464 
2465 static int fix_sub_duration_heartbeat(InputStream *ist, int64_t signal_pts)
2466 {
2467  int ret = AVERROR_BUG;
2468  int got_output = 1;
2469  AVSubtitle *prev_subtitle = &ist->prev_sub.subtitle;
2471 
2472  if (!ist->fix_sub_duration || !prev_subtitle->num_rects ||
2473  signal_pts <= prev_subtitle->pts)
2474  return 0;
2475 
2476  if ((ret = copy_av_subtitle(&subtitle, prev_subtitle)) < 0)
2477  return ret;
2478 
2479  subtitle.pts = signal_pts;
2480 
2481  return process_subtitle(ist, &subtitle, &got_output);
2482 }
2483 
2485 {
2486  OutputFile *of = output_files[ost->file_index];
2487  int64_t signal_pts = av_rescale_q(pkt->pts, pkt->time_base,
2488  AV_TIME_BASE_Q);
2489 
2490  if (!ost->fix_sub_duration_heartbeat || !(pkt->flags & AV_PKT_FLAG_KEY))
2491  // we are only interested in heartbeats on streams configured, and
2492  // only on random access points.
2493  return 0;
2494 
2495  for (int i = 0; i < of->nb_streams; i++) {
2496  OutputStream *iter_ost = of->streams[i];
2497  InputStream *ist = iter_ost->ist;
2498  int ret = AVERROR_BUG;
2499 
2500  if (iter_ost == ost || !ist || !ist->decoding_needed ||
2502  // We wish to skip the stream that causes the heartbeat,
2503  // output streams without an input stream, streams not decoded
2504  // (as fix_sub_duration is only done for decoded subtitles) as
2505  // well as non-subtitle streams.
2506  continue;
2507 
2508  if ((ret = fix_sub_duration_heartbeat(ist, signal_pts)) < 0)
2509  return ret;
2510  }
2511 
2512  return 0;
2513 }
2514 
2516  int *got_output, int *decode_failed)
2517 {
2520  &subtitle, got_output, pkt);
2521 
2523 
2524  if (ret < 0 || !*got_output) {
2525  *decode_failed = 1;
2526  if (!pkt->size)
2527  sub2video_flush(ist);
2528  return ret;
2529  }
2530 
2531  ist->frames_decoded++;
2532 
2533  return process_subtitle(ist, &subtitle, got_output);
2534 }
2535 
2537 {
2538  int i, ret;
2539  /* TODO keep pts also in stream time base to avoid converting back */
2540  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2542 
2543  for (i = 0; i < ist->nb_filters; i++) {
2544  ret = ifilter_send_eof(ist->filters[i], pts);
2545  if (ret < 0)
2546  return ret;
2547  }
2548  return 0;
2549 }
2550 
2551 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2552 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2553 {
2554  const AVCodecParameters *par = ist->par;
2555  int ret = 0;
2556  int repeating = 0;
2557  int eof_reached = 0;
2558 
2559  AVPacket *avpkt = ist->pkt;
2560 
2561  if (!ist->saw_first_ts) {
2562  ist->first_dts =
2563  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2564  ist->pts = 0;
2565  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2566  ist->first_dts =
2567  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2568  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2569  }
2570  ist->saw_first_ts = 1;
2571  }
2572 
2573  if (ist->next_dts == AV_NOPTS_VALUE)
2574  ist->next_dts = ist->dts;
2575  if (ist->next_pts == AV_NOPTS_VALUE)
2576  ist->next_pts = ist->pts;
2577 
2578  if (pkt) {
2579  av_packet_unref(avpkt);
2580  ret = av_packet_ref(avpkt, pkt);
2581  if (ret < 0)
2582  return ret;
2583  }
2584 
2585  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2586  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2588  ist->next_pts = ist->pts = ist->dts;
2589  }
2590 
2591  // while we have more to decode or while the decoder did output something on EOF
2592  while (ist->decoding_needed) {
2593  int64_t duration_dts = 0;
2594  int64_t duration_pts = 0;
2595  int got_output = 0;
2596  int decode_failed = 0;
2597 
2598  ist->pts = ist->next_pts;
2599  ist->dts = ist->next_dts;
2600 
2601  switch (par->codec_type) {
2602  case AVMEDIA_TYPE_AUDIO:
2603  ret = decode_audio (ist, repeating ? NULL : avpkt, &got_output,
2604  &decode_failed);
2605  av_packet_unref(avpkt);
2606  break;
2607  case AVMEDIA_TYPE_VIDEO:
2608  ret = decode_video (ist, repeating ? NULL : avpkt, &got_output, &duration_pts, !pkt,
2609  &decode_failed);
2610  if (!repeating || !pkt || got_output) {
2611  if (pkt && pkt->duration) {
2612  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2613  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2614  int ticks = ist->last_pkt_repeat_pict >= 0 ?
2615  ist->last_pkt_repeat_pict + 1 :
2616  ist->dec_ctx->ticks_per_frame;
2617  duration_dts = ((int64_t)AV_TIME_BASE *
2618  ist->dec_ctx->framerate.den * ticks) /
2620  }
2621 
2622  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2623  ist->next_dts += duration_dts;
2624  }else
2625  ist->next_dts = AV_NOPTS_VALUE;
2626  }
2627 
2628  if (got_output) {
2629  if (duration_pts > 0) {
2630  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2631  } else {
2632  ist->next_pts += duration_dts;
2633  }
2634  }
2635  av_packet_unref(avpkt);
2636  break;
2637  case AVMEDIA_TYPE_SUBTITLE:
2638  if (repeating)
2639  break;
2640  ret = transcode_subtitles(ist, avpkt, &got_output, &decode_failed);
2641  if (!pkt && ret >= 0)
2642  ret = AVERROR_EOF;
2643  av_packet_unref(avpkt);
2644  break;
2645  default:
2646  return -1;
2647  }
2648 
2649  if (ret == AVERROR_EOF) {
2650  eof_reached = 1;
2651  break;
2652  }
2653 
2654  if (ret < 0) {
2655  if (decode_failed) {
2656  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2657  ist->file_index, ist->st->index, av_err2str(ret));
2658  } else {
2659  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2660  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2661  }
2662  if (!decode_failed || exit_on_error)
2663  exit_program(1);
2664  break;
2665  }
2666 
2667  if (got_output)
2668  ist->got_output = 1;
2669 
2670  if (!got_output)
2671  break;
2672 
2673  // During draining, we might get multiple output frames in this loop.
2674  // ffmpeg.c does not drain the filter chain on configuration changes,
2675  // which means if we send multiple frames at once to the filters, and
2676  // one of those frames changes configuration, the buffered frames will
2677  // be lost. This can upset certain FATE tests.
2678  // Decode only 1 frame per call on EOF to appease these FATE tests.
2679  // The ideal solution would be to rewrite decoding to use the new
2680  // decoding API in a better way.
2681  if (!pkt)
2682  break;
2683 
2684  repeating = 1;
2685  }
2686 
2687  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2688  /* except when looping we need to flush but not to send an EOF */
2689  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2690  int ret = send_filter_eof(ist);
2691  if (ret < 0) {
2692  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2693  exit_program(1);
2694  }
2695  }
2696 
2697  /* handle stream copy */
2698  if (!ist->decoding_needed && pkt) {
2699  ist->dts = ist->next_dts;
2700  switch (par->codec_type) {
2701  case AVMEDIA_TYPE_AUDIO:
2702  av_assert1(pkt->duration >= 0);
2703  if (par->sample_rate) {
2704  ist->next_dts += ((int64_t)AV_TIME_BASE * par->frame_size) /
2705  par->sample_rate;
2706  } else {
2708  }
2709  break;
2710  case AVMEDIA_TYPE_VIDEO:
2711  if (ist->framerate.num) {
2712  // TODO: Remove work-around for c99-to-c89 issue 7
2713  AVRational time_base_q = AV_TIME_BASE_Q;
2714  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2715  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2716  } else if (pkt->duration) {
2718  } else if(ist->dec_ctx->framerate.num != 0) {
2719  int ticks = ist->last_pkt_repeat_pict >= 0 ?
2720  ist->last_pkt_repeat_pict + 1 :
2721  ist->dec_ctx->ticks_per_frame;
2722  ist->next_dts += ((int64_t)AV_TIME_BASE *
2723  ist->dec_ctx->framerate.den * ticks) /
2725  }
2726  break;
2727  }
2728  ist->pts = ist->dts;
2729  ist->next_pts = ist->next_dts;
2730  } else if (!ist->decoding_needed)
2731  eof_reached = 1;
2732 
2733  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
2734  if (!check_output_constraints(ist, ost) || ost->enc_ctx ||
2735  (!pkt && no_eof))
2736  continue;
2737 
2738  do_streamcopy(ist, ost, pkt);
2739  }
2740 
2741  return !eof_reached;
2742 }
2743 
2745 {
2746  InputStream *ist = s->opaque;
2747  const enum AVPixelFormat *p;
2748  int ret;
2749 
2750  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2752  const AVCodecHWConfig *config = NULL;
2753  int i;
2754 
2755  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2756  break;
2757 
2758  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2759  ist->hwaccel_id == HWACCEL_AUTO) {
2760  for (i = 0;; i++) {
2761  config = avcodec_get_hw_config(s->codec, i);
2762  if (!config)
2763  break;
2764  if (!(config->methods &
2766  continue;
2767  if (config->pix_fmt == *p)
2768  break;
2769  }
2770  }
2771  if (config && config->device_type == ist->hwaccel_device_type) {
2773  if (ret < 0) {
2774  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2776  "%s hwaccel requested for input stream #%d:%d, "
2777  "but cannot be initialized.\n",
2778  av_hwdevice_get_type_name(config->device_type),
2779  ist->file_index, ist->st->index);
2780  return AV_PIX_FMT_NONE;
2781  }
2782  continue;
2783  }
2784 
2785  ist->hwaccel_pix_fmt = *p;
2786  break;
2787  }
2788  }
2789 
2790  return *p;
2791 }
2792 
2793 static int init_input_stream(InputStream *ist, char *error, int error_len)
2794 {
2795  int ret;
2796 
2797  if (ist->decoding_needed) {
2798  const AVCodec *codec = ist->dec;
2799  if (!codec) {
2800  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2801  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2802  return AVERROR(EINVAL);
2803  }
2804 
2805  ist->dec_ctx->opaque = ist;
2806  ist->dec_ctx->get_format = get_format;
2807 
2808  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2809  (ist->decoding_needed & DECODING_FOR_OST)) {
2810  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2812  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2813  }
2814 
2815  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2816  * audio, and video decoders such as cuvid or mediacodec */
2817  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2818 
2819  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2820  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2821  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2823  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2824 
2826  if (ret < 0) {
2827  snprintf(error, error_len, "Device setup failed for "
2828  "decoder on input stream #%d:%d : %s",
2829  ist->file_index, ist->st->index, av_err2str(ret));
2830  return ret;
2831  }
2832 
2833  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2834  if (ret == AVERROR_EXPERIMENTAL)
2835  abort_codec_experimental(codec, 0);
2836 
2837  snprintf(error, error_len,
2838  "Error while opening decoder for input stream "
2839  "#%d:%d : %s",
2840  ist->file_index, ist->st->index, av_err2str(ret));
2841  return ret;
2842  }
2844  }
2845 
2846  ist->next_pts = AV_NOPTS_VALUE;
2847  ist->next_dts = AV_NOPTS_VALUE;
2848 
2849  return 0;
2850 }
2851 
2853 {
2854  OutputFile *of = output_files[ost->file_index];
2855  InputStream *ist = ost->ist;
2858  AVCodecContext *codec_ctx;
2859  AVRational sar;
2860  int i, ret;
2861  uint32_t codec_tag = par->codec_tag;
2862 
2863  av_assert0(ist && !ost->filter);
2864 
2865  codec_ctx = avcodec_alloc_context3(NULL);
2866  if (!codec_ctx)
2867  return AVERROR(ENOMEM);
2868 
2869  ret = avcodec_parameters_to_context(codec_ctx, ist->par);
2870  if (ret >= 0)
2871  ret = av_opt_set_dict(codec_ctx, &ost->encoder_opts);
2872  if (ret < 0) {
2874  "Error setting up codec context options.\n");
2875  avcodec_free_context(&codec_ctx);
2876  return ret;
2877  }
2878 
2879  ret = avcodec_parameters_from_context(par, codec_ctx);
2880  avcodec_free_context(&codec_ctx);
2881  if (ret < 0) {
2883  "Error getting reference codec parameters.\n");
2884  return ret;
2885  }
2886 
2887  if (!codec_tag) {
2888  unsigned int codec_tag_tmp;
2889  if (!of->format->codec_tag ||
2891  !av_codec_get_tag2(of->format->codec_tag, par->codec_id, &codec_tag_tmp))
2892  codec_tag = par->codec_tag;
2893  }
2894 
2895  par->codec_tag = codec_tag;
2896 
2897  if (!ost->frame_rate.num)
2898  ost->frame_rate = ist->framerate;
2899 
2900  if (ost->frame_rate.num)
2901  ost->st->avg_frame_rate = ost->frame_rate;
2902  else
2903  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2904 
2906  if (ret < 0)
2907  return ret;
2908 
2909  // copy timebase while removing common factors
2910  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) {
2911  if (ost->frame_rate.num)
2912  ost->st->time_base = av_inv_q(ost->frame_rate);
2913  else
2915  }
2916 
2917  // copy estimated duration as a hint to the muxer
2918  if (ost->st->duration <= 0 && ist->st->duration > 0)
2919  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
2920 
2921  if (!ost->copy_prior_start) {
2922  ost->ts_copy_start = (of->start_time == AV_NOPTS_VALUE) ?
2923  0 : of->start_time;
2924  if (copy_ts && ifile->start_time != AV_NOPTS_VALUE) {
2925  ost->ts_copy_start = FFMAX(ost->ts_copy_start,
2926  ifile->start_time + ifile->ts_offset);
2927  }
2928  }
2929 
2930  if (ist->st->nb_side_data) {
2931  for (i = 0; i < ist->st->nb_side_data; i++) {
2932  const AVPacketSideData *sd_src = &ist->st->side_data[i];
2933  uint8_t *dst_data;
2934 
2935  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
2936  if (!dst_data)
2937  return AVERROR(ENOMEM);
2938  memcpy(dst_data, sd_src->data, sd_src->size);
2939  }
2940  }
2941 
2942 #if FFMPEG_ROTATION_METADATA
2943  if (ost->rotate_overridden) {
2945  sizeof(int32_t) * 9);
2946  if (sd)
2947  av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
2948  }
2949 #endif
2950 
2951  switch (par->codec_type) {
2952  case AVMEDIA_TYPE_AUDIO:
2953  if ((par->block_align == 1 || par->block_align == 1152 || par->block_align == 576) &&
2955  par->block_align = 0;
2956  if (par->codec_id == AV_CODEC_ID_AC3)
2957  par->block_align = 0;
2958  break;
2959  case AVMEDIA_TYPE_VIDEO:
2960  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2961  sar =
2962  av_mul_q(ost->frame_aspect_ratio,
2963  (AVRational){ par->height, par->width });
2964  av_log(ost, AV_LOG_WARNING, "Overriding aspect ratio "
2965  "with stream copy may produce invalid files\n");
2966  }
2967  else if (ist->st->sample_aspect_ratio.num)
2968  sar = ist->st->sample_aspect_ratio;
2969  else
2970  sar = par->sample_aspect_ratio;
2972  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2973  ost->st->r_frame_rate = ist->st->r_frame_rate;
2974  break;
2975  }
2976 
2977  ost->mux_timebase = ist->st->time_base;
2978 
2979  return 0;
2980 }
2981 
2983 {
2984  const char *cname = ost->enc_ctx->codec->name;
2985  uint8_t *encoder_string;
2986  int encoder_string_len;
2987 
2988  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2989  return;
2990 
2991  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(cname) + 2;
2992  encoder_string = av_mallocz(encoder_string_len);
2993  if (!encoder_string)
2994  report_and_exit(AVERROR(ENOMEM));
2995 
2996  if (!of->bitexact && !ost->bitexact)
2997  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2998  else
2999  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3000  av_strlcat(encoder_string, cname, encoder_string_len);
3001  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3003 }
3004 
3005 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3006 {
3007  InputStream *ist = ost->ist;
3008  AVCodecContext *enc_ctx = ost->enc_ctx;
3009 
3010  if (ost->enc_timebase.num > 0) {
3011  enc_ctx->time_base = ost->enc_timebase;
3012  return;
3013  }
3014 
3015  if (ost->enc_timebase.num < 0) {
3016  if (ist) {
3017  enc_ctx->time_base = ist->st->time_base;
3018  return;
3019  }
3020 
3022  "Input stream data not available, using default time base\n");
3023  }
3024 
3025  enc_ctx->time_base = default_time_base;
3026 }
3027 
3029 {
3030  InputStream *ist = ost->ist;
3031  AVCodecContext *enc_ctx = ost->enc_ctx;
3033  OutputFile *of = output_files[ost->file_index];
3034  int ret;
3035 
3036  set_encoder_id(output_files[ost->file_index], ost);
3037 
3038  if (ist) {
3039  dec_ctx = ist->dec_ctx;
3040  }
3041 
3042  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3043  if (!ost->frame_rate.num)
3044  ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3045  if (!ost->frame_rate.num && !ost->max_frame_rate.num) {
3046  ost->frame_rate = (AVRational){25, 1};
3048  "No information "
3049  "about the input framerate is available. Falling "
3050  "back to a default value of 25fps. Use the -r option "
3051  "if you want a different framerate.\n");
3052  }
3053 
3054  if (ost->max_frame_rate.num &&
3055  (av_q2d(ost->frame_rate) > av_q2d(ost->max_frame_rate) ||
3056  !ost->frame_rate.den))
3057  ost->frame_rate = ost->max_frame_rate;
3058 
3059  if (enc_ctx->codec->supported_framerates && !ost->force_fps) {
3060  int idx = av_find_nearest_q_idx(ost->frame_rate, enc_ctx->codec->supported_framerates);
3061  ost->frame_rate = enc_ctx->codec->supported_framerates[idx];
3062  }
3063  // reduce frame rate for mpeg4 to be within the spec limits
3064  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3065  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3066  ost->frame_rate.num, ost->frame_rate.den, 65535);
3067  }
3068  }
3069 
3070  switch (enc_ctx->codec_type) {
3071  case AVMEDIA_TYPE_AUDIO:
3072  enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3073  enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3074  ret = av_buffersink_get_ch_layout(ost->filter->filter, &enc_ctx->ch_layout);
3075  if (ret < 0)
3076  return ret;
3077 
3078  if (ost->bits_per_raw_sample)
3079  enc_ctx->bits_per_raw_sample = ost->bits_per_raw_sample;
3080  else if (dec_ctx && ost->filter->graph->is_meta)
3082  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3083 
3085  break;
3086 
3087  case AVMEDIA_TYPE_VIDEO:
3088  init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3089 
3090  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3091  enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3092  if ( av_q2d(enc_ctx->time_base) < 0.001 && ost->vsync_method != VSYNC_PASSTHROUGH
3093  && (ost->vsync_method == VSYNC_CFR || ost->vsync_method == VSYNC_VSCFR ||
3094  (ost->vsync_method == VSYNC_AUTO && !(of->format->flags & AVFMT_VARIABLE_FPS)))){
3095  av_log(ost, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3096  "Please consider specifying a lower framerate, a different muxer or "
3097  "setting vsync/fps_mode to vfr\n");
3098  }
3099 
3100  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3101  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3102  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3103  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3104  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3105  av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3106 
3107  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3108 
3109  if (ost->bits_per_raw_sample)
3110  enc_ctx->bits_per_raw_sample = ost->bits_per_raw_sample;
3111  else if (dec_ctx && ost->filter->graph->is_meta)
3113  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3114 
3115  if (frame) {
3116  enc_ctx->color_range = frame->color_range;
3117  enc_ctx->color_primaries = frame->color_primaries;
3118  enc_ctx->color_trc = frame->color_trc;
3119  enc_ctx->colorspace = frame->colorspace;
3120  enc_ctx->chroma_sample_location = frame->chroma_location;
3121  }
3122 
3123  enc_ctx->framerate = ost->frame_rate;
3124 
3125  ost->st->avg_frame_rate = ost->frame_rate;
3126 
3127  // Field order: autodetection
3128  if (frame) {
3130  ost->top_field_first >= 0)
3131  frame->top_field_first = !!ost->top_field_first;
3132 
3133  if (frame->interlaced_frame) {
3134  if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
3135  enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
3136  else
3137  enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
3138  } else
3139  enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
3140  }
3141 
3142  // Field order: override
3143  if (ost->top_field_first == 0) {
3144  enc_ctx->field_order = AV_FIELD_BB;
3145  } else if (ost->top_field_first == 1) {
3146  enc_ctx->field_order = AV_FIELD_TT;
3147  }
3148 
3149  break;
3150  case AVMEDIA_TYPE_SUBTITLE:
3151  enc_ctx->time_base = AV_TIME_BASE_Q;
3152  if (!enc_ctx->width) {
3153  enc_ctx->width = ost->ist->par->width;
3154  enc_ctx->height = ost->ist->par->height;
3155  }
3156  if (dec_ctx && dec_ctx->subtitle_header) {
3157  /* ASS code assumes this buffer is null terminated so add extra byte. */
3158  ost->enc_ctx->subtitle_header = av_mallocz(dec_ctx->subtitle_header_size + 1);
3159  if (!ost->enc_ctx->subtitle_header)
3160  return AVERROR(ENOMEM);
3161  memcpy(ost->enc_ctx->subtitle_header, dec_ctx->subtitle_header,
3163  ost->enc_ctx->subtitle_header_size = dec_ctx->subtitle_header_size;
3164  }
3165  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE &&
3166  enc_ctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
3167  int input_props = 0, output_props = 0;
3168  AVCodecDescriptor const *input_descriptor =
3169  avcodec_descriptor_get(ist->dec->id);
3170  AVCodecDescriptor const *output_descriptor =
3171  avcodec_descriptor_get(ost->enc_ctx->codec_id);
3172  if (input_descriptor)
3173  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3174  if (output_descriptor)
3175  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3176  if (input_props && output_props && input_props != output_props) {
3178  "Subtitle encoding currently only possible from text to text "
3179  "or bitmap to bitmap");
3180  return AVERROR_INVALIDDATA;
3181  }
3182  }
3183 
3184  break;
3185  case AVMEDIA_TYPE_DATA:
3186  break;
3187  default:
3188  abort();
3189  break;
3190  }
3191 
3192  if (ost->bitexact)
3193  enc_ctx->flags |= AV_CODEC_FLAG_BITEXACT;
3194 
3195  if (ost->sq_idx_encode >= 0)
3196  sq_set_tb(of->sq_encode, ost->sq_idx_encode, enc_ctx->time_base);
3197 
3198  ost->mux_timebase = enc_ctx->time_base;
3199 
3200  return 0;
3201 }
3202 
3204  char *error, int error_len)
3205 {
3206  int ret = 0;
3207 
3208  if (ost->enc_ctx) {
3209  const AVCodec *codec = ost->enc_ctx->codec;
3210  InputStream *ist = ost->ist;
3211 
3213  if (ret < 0)
3214  return ret;
3215 
3216  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3217  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3218 
3220  ret = av_dict_set(&ost->encoder_opts, "flags", "+copy_opaque", AV_DICT_MULTIKEY);
3221  if (ret < 0)
3222  return ret;
3223  }
3224 
3226  if (ret < 0) {
3227  snprintf(error, error_len, "Device setup failed for "
3228  "encoder on output stream #%d:%d : %s",
3229  ost->file_index, ost->index, av_err2str(ret));
3230  return ret;
3231  }
3232 
3233  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3234  if (ret == AVERROR_EXPERIMENTAL)
3235  abort_codec_experimental(codec, 1);
3236  snprintf(error, error_len,
3237  "Error while opening encoder for output stream #%d:%d - "
3238  "maybe incorrect parameters such as bit_rate, rate, width or height",
3239  ost->file_index, ost->index);
3240  return ret;
3241  }
3242  if (codec->type == AVMEDIA_TYPE_AUDIO &&
3244  av_buffersink_set_frame_size(ost->filter->filter,
3245  ost->enc_ctx->frame_size);
3246  assert_avoptions(ost->encoder_opts);
3247  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3248  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3249  av_log(ost, AV_LOG_WARNING, "The bitrate parameter is set too low."
3250  " It takes bits/s as argument, not kbits/s\n");
3251 
3253  if (ret < 0) {
3255  "Error initializing the output stream codec context.\n");
3256  exit_program(1);
3257  }
3258 
3259  if (ost->enc_ctx->nb_coded_side_data) {
3260  int i;
3261 
3262  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3263  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3264  uint8_t *dst_data;
3265 
3266  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3267  if (!dst_data)
3268  return AVERROR(ENOMEM);
3269  memcpy(dst_data, sd_src->data, sd_src->size);
3270  }
3271  }
3272 
3273  /*
3274  * Add global input side data. For now this is naive, and copies it
3275  * from the input stream's global side data. All side data should
3276  * really be funneled over AVFrame and libavfilter, then added back to
3277  * packet side data, and then potentially using the first packet for
3278  * global side data.
3279  */
3280  if (ist) {
3281  int i;
3282  for (i = 0; i < ist->st->nb_side_data; i++) {
3283  AVPacketSideData *sd = &ist->st->side_data[i];
3284  if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3285  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3286  if (!dst)
3287  return AVERROR(ENOMEM);
3288  memcpy(dst, sd->data, sd->size);
3289  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3290  av_display_rotation_set((int32_t *)dst, 0);
3291  }
3292  }
3293  }
3294 
3295  // copy timebase while removing common factors
3296  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3297  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3298 
3299  // copy estimated duration as a hint to the muxer
3300  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3301  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3302  } else if (ost->ist) {
3304  if (ret < 0)
3305  return ret;
3306  }
3307 
3308  ret = of_stream_init(output_files[ost->file_index], ost);
3309  if (ret < 0)
3310  return ret;
3311 
3312  return ret;
3313 }
3314 
3315 static int transcode_init(void)
3316 {
3317  int ret = 0;
3318  char error[1024] = {0};
3319 
3320  /* init framerate emulation */
3321  for (int i = 0; i < nb_input_files; i++) {
3323  if (ifile->readrate || ifile->rate_emu)
3324  for (int j = 0; j < ifile->nb_streams; j++)
3325  ifile->streams[j]->start = av_gettime_relative();
3326  }
3327 
3328  /* init input streams */
3329  for (InputStream *ist = ist_iter(NULL); ist; ist = ist_iter(ist))
3330  if ((ret = init_input_stream(ist, error, sizeof(error))) < 0)
3331  goto dump_format;
3332 
3333  /*
3334  * initialize stream copy and subtitle/data streams.
3335  * Encoded AVFrame based streams will get initialized as follows:
3336  * - when the first AVFrame is received in do_video_out
3337  * - just before the first AVFrame is received in either transcode_step
3338  * or reap_filters due to us requiring the filter chain buffer sink
3339  * to be configured with the correct audio frame size, which is only
3340  * known after the encoder is initialized.
3341  */
3342  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
3343  if (ost->enc_ctx &&
3346  continue;
3347 
3349  if (ret < 0)
3350  goto dump_format;
3351  }
3352 
3353  /* discard unused programs */
3354  for (int i = 0; i < nb_input_files; i++) {
3356  for (int j = 0; j < ifile->ctx->nb_programs; j++) {
3357  AVProgram *p = ifile->ctx->programs[j];
3358  int discard = AVDISCARD_ALL;
3359 
3360  for (int k = 0; k < p->nb_stream_indexes; k++)
3361  if (!ifile->streams[p->stream_index[k]]->discard) {
3363  break;
3364  }
3365  p->discard = discard;
3366  }
3367  }
3368 
3369  dump_format:
3370  /* dump the stream mapping */
3371  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3372  for (InputStream *ist = ist_iter(NULL); ist; ist = ist_iter(ist)) {
3373  for (int j = 0; j < ist->nb_filters; j++) {
3374  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3375  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3376  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3377  ist->filters[j]->name);
3378  if (nb_filtergraphs > 1)
3379  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3380  av_log(NULL, AV_LOG_INFO, "\n");
3381  }
3382  }
3383  }
3384 
3385  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
3386  if (ost->attachment_filename) {
3387  /* an attached file */
3388  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3389  ost->attachment_filename, ost->file_index, ost->index);
3390  continue;
3391  }
3392 
3393  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3394  /* output from a complex graph */
3395  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3396  if (nb_filtergraphs > 1)
3397  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3398 
3399  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3400  ost->index, ost->enc_ctx->codec->name);
3401  continue;
3402  }
3403 
3404  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3405  ost->ist->file_index,
3406  ost->ist->st->index,
3407  ost->file_index,
3408  ost->index);
3409  if (ost->enc_ctx) {
3410  const AVCodec *in_codec = ost->ist->dec;
3411  const AVCodec *out_codec = ost->enc_ctx->codec;
3412  const char *decoder_name = "?";
3413  const char *in_codec_name = "?";
3414  const char *encoder_name = "?";
3415  const char *out_codec_name = "?";
3416  const AVCodecDescriptor *desc;
3417 
3418  if (in_codec) {
3419  decoder_name = in_codec->name;
3420  desc = avcodec_descriptor_get(in_codec->id);
3421  if (desc)
3422  in_codec_name = desc->name;
3423  if (!strcmp(decoder_name, in_codec_name))
3424  decoder_name = "native";
3425  }
3426 
3427  if (out_codec) {
3428  encoder_name = out_codec->name;
3429  desc = avcodec_descriptor_get(out_codec->id);
3430  if (desc)
3431  out_codec_name = desc->name;
3432  if (!strcmp(encoder_name, out_codec_name))
3433  encoder_name = "native";
3434  }
3435 
3436  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3437  in_codec_name, decoder_name,
3438  out_codec_name, encoder_name);
3439  } else
3440  av_log(NULL, AV_LOG_INFO, " (copy)");
3441  av_log(NULL, AV_LOG_INFO, "\n");
3442  }
3443 
3444  if (ret) {
3445  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3446  return ret;
3447  }
3448 
3450 
3451  return 0;
3452 }
3453 
3454 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3455 static int need_output(void)
3456 {
3457  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
3458  if (ost->finished)
3459  continue;
3460 
3461  return 1;
3462  }
3463 
3464  return 0;
3465 }
3466 
3467 /**
3468  * Select the output stream to process.
3469  *
3470  * @return selected output stream, or NULL if none available
3471  */
3473 {
3474  int64_t opts_min = INT64_MAX;
3475  OutputStream *ost_min = NULL;
3476 
3477  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
3478  int64_t opts;
3479 
3480  if (ost->filter && ost->last_filter_pts != AV_NOPTS_VALUE) {
3481  opts = ost->last_filter_pts;
3482  } else {
3483  opts = ost->last_mux_dts == AV_NOPTS_VALUE ?
3484  INT64_MIN : ost->last_mux_dts;
3485  if (ost->last_mux_dts == AV_NOPTS_VALUE)
3487  "cur_dts is invalid [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3488  ost->initialized, ost->inputs_done, ost->finished);
3489  }
3490 
3491  if (!ost->initialized && !ost->inputs_done)
3492  return ost->unavailable ? NULL : ost;
3493 
3494  if (!ost->finished && opts < opts_min) {
3495  opts_min = opts;
3496  ost_min = ost->unavailable ? NULL : ost;
3497  }
3498  }
3499  return ost_min;
3500 }
3501 
3502 static void set_tty_echo(int on)
3503 {
3504 #if HAVE_TERMIOS_H
3505  struct termios tty;
3506  if (tcgetattr(0, &tty) == 0) {
3507  if (on) tty.c_lflag |= ECHO;
3508  else tty.c_lflag &= ~ECHO;
3509  tcsetattr(0, TCSANOW, &tty);
3510  }
3511 #endif
3512 }
3513 
3514 static int check_keyboard_interaction(int64_t cur_time)
3515 {
3516  int i, ret, key;
3517  static int64_t last_time;
3518  if (received_nb_signals)
3519  return AVERROR_EXIT;
3520  /* read_key() returns 0 on EOF */
3521  if (cur_time - last_time >= 100000) {
3522  key = read_key();
3523  last_time = cur_time;
3524  }else
3525  key = -1;
3526  if (key == 'q') {
3527  av_log(NULL, AV_LOG_INFO, "\n\n[q] command received. Exiting.\n\n");
3528  return AVERROR_EXIT;
3529  }
3530  if (key == '+') av_log_set_level(av_log_get_level()+10);
3531  if (key == '-') av_log_set_level(av_log_get_level()-10);
3532  if (key == 's') qp_hist ^= 1;
3533  if (key == 'c' || key == 'C'){
3534  char buf[4096], target[64], command[256], arg[256] = {0};
3535  double time;
3536  int k, n = 0;
3537  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3538  i = 0;
3539  set_tty_echo(1);
3540  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3541  if (k > 0)
3542  buf[i++] = k;
3543  buf[i] = 0;
3544  set_tty_echo(0);
3545  fprintf(stderr, "\n");
3546  if (k > 0 &&
3547  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3548  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3549  target, time, command, arg);
3550  for (i = 0; i < nb_filtergraphs; i++) {
3551  FilterGraph *fg = filtergraphs[i];
3552  if (fg->graph) {
3553  if (time < 0) {
3554  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3555  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3556  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3557  } else if (key == 'c') {
3558  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3560  } else {
3561  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3562  if (ret < 0)
3563  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3564  }
3565  }
3566  }
3567  } else {
3569  "Parse error, at least 3 arguments were expected, "
3570  "only %d given in string '%s'\n", n, buf);
3571  }
3572  }
3573  if (key == 'd' || key == 'D'){
3574  int debug=0;
3575  if(key == 'D') {
3576  InputStream *ist = ist_iter(NULL);
3577 
3578  if (ist)
3579  debug = ist->dec_ctx->debug << 1;
3580 
3581  if(!debug) debug = 1;
3582  while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash
3583  debug += debug;
3584  }else{
3585  char buf[32];
3586  int k = 0;
3587  i = 0;
3588  set_tty_echo(1);
3589  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3590  if (k > 0)
3591  buf[i++] = k;
3592  buf[i] = 0;
3593  set_tty_echo(0);
3594  fprintf(stderr, "\n");
3595  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3596  fprintf(stderr,"error parsing debug value\n");
3597  }
3598  for (InputStream *ist = ist_iter(NULL); ist; ist = ist_iter(ist))
3599  ist->dec_ctx->debug = debug;
3601  if (ost->enc_ctx)
3602  ost->enc_ctx->debug = debug;
3603  }
3604  if(debug) av_log_set_level(AV_LOG_DEBUG);
3605  fprintf(stderr,"debug=%d\n", debug);
3606  }
3607  if (key == '?'){
3608  fprintf(stderr, "key function\n"
3609  "? show this help\n"
3610  "+ increase verbosity\n"
3611  "- decrease verbosity\n"
3612  "c Send command to first matching filter supporting it\n"
3613  "C Send/Queue command to all matching filters\n"
3614  "D cycle through available debug modes\n"
3615  "h dump packets/hex press to cycle through the 3 states\n"
3616  "q quit\n"
3617  "s Show QP histogram\n"
3618  );
3619  }
3620  return 0;
3621 }
3622 
3623 static int got_eagain(void)
3624 {
3625  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost))
3626  if (ost->unavailable)
3627  return 1;
3628  return 0;
3629 }
3630 
3631 static void reset_eagain(void)
3632 {
3633  int i;
3634  for (i = 0; i < nb_input_files; i++)
3635  input_files[i]->eagain = 0;
3636  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost))
3637  ost->unavailable = 0;
3638 }
3639 
3641 {
3642  for (int i = 0; i < ifile->nb_streams; i++) {
3643  InputStream *ist = ifile->streams[i];
3644  int ret;
3645 
3646  if (!ist->processing_needed)
3647  continue;
3648 
3649  do {
3650  ret = process_input_packet(ist, NULL, 1);
3651  } while (ret > 0);
3652 
3653  if (ist->decoding_needed) {
3654  /* report last frame duration to the demuxer thread */
3655  if (ist->par->codec_type == AVMEDIA_TYPE_AUDIO) {
3656  LastFrameDuration dur;
3657 
3658  dur.stream_idx = i;
3659  dur.duration = av_rescale_q(ist->nb_samples,
3660  (AVRational){ 1, ist->dec_ctx->sample_rate},
3661  ist->st->time_base);
3662 
3663  av_thread_message_queue_send(ifile->audio_duration_queue, &dur, 0);
3664  }
3665 
3667  }
3668  }
3669 }
3670 
3672  AVPacket *pkt)
3673 {
3674  const int fmt_is_discont = ifile->ctx->iformat->flags & AVFMT_TS_DISCONT;
3675  int disable_discontinuity_correction = copy_ts;
3676  int64_t pkt_dts = av_rescale_q_rnd(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q,
3678 
3679  if (copy_ts && ist->next_dts != AV_NOPTS_VALUE &&
3680  fmt_is_discont && ist->st->pts_wrap_bits < 60) {
3681  int64_t wrap_dts = av_rescale_q_rnd(pkt->dts + (1LL<<ist->st->pts_wrap_bits),
3682  ist->st->time_base, AV_TIME_BASE_Q,
3684  if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
3685  disable_discontinuity_correction = 0;
3686  }
3687 
3688  if (ist->next_dts != AV_NOPTS_VALUE && !disable_discontinuity_correction) {
3689  int64_t delta = pkt_dts - ist->next_dts;
3690  if (fmt_is_discont) {
3691  if (FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE ||
3692  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3693  ifile->ts_offset_discont -= delta;
3695  "timestamp discontinuity for stream #%d:%d "
3696  "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
3697  ist->file_index, ist->st->index, ist->st->id,
3699  delta, ifile->ts_offset_discont);
3701  if (pkt->pts != AV_NOPTS_VALUE)
3703  }
3704  } else {
3705  if (FFABS(delta) > 1LL * dts_error_threshold * AV_TIME_BASE) {
3706  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt->dts, ist->next_dts, pkt->stream_index);
3707  pkt->dts = AV_NOPTS_VALUE;
3708  }
3709  if (pkt->pts != AV_NOPTS_VALUE){
3710  int64_t pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
3711  delta = pkt_pts - ist->next_dts;
3712  if (FFABS(delta) > 1LL * dts_error_threshold * AV_TIME_BASE) {
3713  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt->pts, ist->next_dts, pkt->stream_index);
3714  pkt->pts = AV_NOPTS_VALUE;
3715  }
3716  }
3717  }
3718  } else if (ist->next_dts == AV_NOPTS_VALUE && !copy_ts &&
3719  fmt_is_discont && ifile->last_ts != AV_NOPTS_VALUE) {
3720  int64_t delta = pkt_dts - ifile->last_ts;
3721  if (FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE) {
3722  ifile->ts_offset_discont -= delta;
3724  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3725  delta, ifile->ts_offset_discont);
3727  if (pkt->pts != AV_NOPTS_VALUE)
3729  }
3730  }
3731 
3732  ifile->last_ts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
3733 }
3734 
3736  AVPacket *pkt)
3737 {
3738  int64_t offset = av_rescale_q(ifile->ts_offset_discont, AV_TIME_BASE_Q,
3739  ist->st->time_base);
3740 
3741  // apply previously-detected timestamp-discontinuity offset
3742  // (to all streams, not just audio/video)
3743  if (pkt->dts != AV_NOPTS_VALUE)
3744  pkt->dts += offset;
3745  if (pkt->pts != AV_NOPTS_VALUE)
3746  pkt->pts += offset;
3747 
3748  // detect timestamp discontinuities for audio/video
3749  if ((ist->par->codec_type == AVMEDIA_TYPE_VIDEO ||
3750  ist->par->codec_type == AVMEDIA_TYPE_AUDIO) &&
3751  pkt->dts != AV_NOPTS_VALUE)
3753 }
3754 
3755 /*
3756  * Return
3757  * - 0 -- one packet was read and processed
3758  * - AVERROR(EAGAIN) -- no packets were available for selected file,
3759  * this function should be called again
3760  * - AVERROR_EOF -- this function should not be called again
3761  */
3762 static int process_input(int file_index)
3763 {
3764  InputFile *ifile = input_files[file_index];
3766  InputStream *ist;
3767  AVPacket *pkt;
3768  int ret, i;
3769 
3770  is = ifile->ctx;
3772 
3773  if (ret == AVERROR(EAGAIN)) {
3774  ifile->eagain = 1;
3775  return ret;
3776  }
3777  if (ret == 1) {
3778  /* the input file is looped: flush the decoders */
3780  return AVERROR(EAGAIN);
3781  }
3782  if (ret < 0) {
3783  if (ret != AVERROR_EOF) {
3784  print_error(is->url, ret);
3785  if (exit_on_error)
3786  exit_program(1);
3787  }
3788 
3789  for (i = 0; i < ifile->nb_streams; i++) {
3790  ist = ifile->streams[i];
3791  if (ist->processing_needed) {
3792  ret = process_input_packet(ist, NULL, 0);
3793  if (ret>0)
3794  return 0;
3795  }
3796 
3797  /* mark all outputs that don't go through lavfi as finished */
3798  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
3799  if (ost->ist == ist &&
3800  (!ost->enc_ctx || ost->enc_ctx->codec_type == AVMEDIA_TYPE_SUBTITLE)) {
3801  OutputFile *of = output_files[ost->file_index];
3802  of_output_packet(of, ost->pkt, ost, 1);
3803  }
3804  }
3805  }
3806 
3807  ifile->eof_reached = 1;
3808  return AVERROR(EAGAIN);
3809  }
3810 
3811  reset_eagain();
3812 
3813  ist = ifile->streams[pkt->stream_index];
3814 
3815  ist->data_size += pkt->size;
3816  ist->nb_packets++;
3817 
3818  if (ist->discard)
3819  goto discard_packet;
3820 
3821  /* add the stream-global side data to the first packet */
3822  if (ist->nb_packets == 1) {
3823  for (i = 0; i < ist->st->nb_side_data; i++) {
3824  AVPacketSideData *src_sd = &ist->st->side_data[i];
3825  uint8_t *dst_data;
3826 
3827  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3828  continue;
3829 
3830  if (av_packet_get_side_data(pkt, src_sd->type, NULL))
3831  continue;
3832 
3833  dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size);
3834  if (!dst_data)
3835  report_and_exit(AVERROR(ENOMEM));
3836 
3837  memcpy(dst_data, src_sd->data, src_sd->size);
3838  }
3839  }
3840 
3841  // detect and try to correct for timestamp discontinuities
3843 
3844  if (debug_ts) {
3845  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s duration:%s duration_time:%s off:%s off_time:%s\n",
3846  ifile->index, pkt->stream_index,
3853  }
3854 
3855  sub2video_heartbeat(ist, pkt->pts);
3856 
3857  process_input_packet(ist, pkt, 0);
3858 
3859 discard_packet:
3860  av_packet_free(&pkt);
3861 
3862  return 0;
3863 }
3864 
3865 /**
3866  * Perform a step of transcoding for the specified filter graph.
3867  *
3868  * @param[in] graph filter graph to consider
3869  * @param[out] best_ist input stream where a frame would allow to continue
3870  * @return 0 for success, <0 for error
3871  */
3872 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
3873 {
3874  int i, ret;
3875  int nb_requests, nb_requests_max = 0;
3876  InputFilter *ifilter;
3877  InputStream *ist;
3878 
3879  *best_ist = NULL;
3881  if (ret >= 0)
3882  return reap_filters(0);
3883 
3884  if (ret == AVERROR_EOF) {
3885  ret = reap_filters(1);
3886  for (i = 0; i < graph->nb_outputs; i++)
3887  close_output_stream(graph->outputs[i]->ost);
3888  return ret;
3889  }
3890  if (ret != AVERROR(EAGAIN))
3891  return ret;
3892 
3893  for (i = 0; i < graph->nb_inputs; i++) {
3894  ifilter = graph->inputs[i];
3895  ist = ifilter->ist;
3896  if (input_files[ist->file_index]->eagain ||
3898  continue;
3899  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
3900  if (nb_requests > nb_requests_max) {
3901  nb_requests_max = nb_requests;
3902  *best_ist = ist;
3903  }
3904  }
3905 
3906  if (!*best_ist)
3907  for (i = 0; i < graph->nb_outputs; i++)
3908  graph->outputs[i]->ost->unavailable = 1;
3909 
3910  return 0;
3911 }
3912 
3913 /**
3914  * Run a single step of transcoding.
3915  *
3916  * @return 0 for success, <0 for error
3917  */
3918 static int transcode_step(void)
3919 {
3920  OutputStream *ost;
3921  InputStream *ist = NULL;
3922  int ret;
3923 
3924  ost = choose_output();
3925  if (!ost) {
3926  if (got_eagain()) {
3927  reset_eagain();
3928  av_usleep(10000);
3929  return 0;
3930  }
3931  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
3932  return AVERROR_EOF;
3933  }
3934 
3935  if (ost->filter && !ost->filter->graph->graph) {
3936  if (ifilter_has_all_input_formats(ost->filter->graph)) {
3937  ret = configure_filtergraph(ost->filter->graph);
3938  if (ret < 0) {
3939  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
3940  return ret;
3941  }
3942  }
3943  }
3944 
3945  if (ost->filter && ost->filter->graph->graph) {
3946  /*
3947  * Similar case to the early audio initialization in reap_filters.
3948  * Audio is special in ffmpeg.c currently as we depend on lavfi's
3949  * audio frame buffering/creation to get the output audio frame size
3950  * in samples correct. The audio frame size for the filter chain is
3951  * configured during the output stream initialization.
3952  *
3953  * Apparently avfilter_graph_request_oldest (called in
3954  * transcode_from_filter just down the line) peeks. Peeking already
3955  * puts one frame "ready to be given out", which means that any
3956  * update in filter buffer sink configuration afterwards will not
3957  * help us. And yes, even if it would be utilized,
3958  * av_buffersink_get_samples is affected, as it internally utilizes
3959  * the same early exit for peeked frames.
3960  *
3961  * In other words, if avfilter_graph_request_oldest would not make
3962  * further filter chain configuration or usage of
3963  * av_buffersink_get_samples useless (by just causing the return
3964  * of the peeked AVFrame as-is), we could get rid of this additional
3965  * early encoder initialization.
3966  */
3967  if (av_buffersink_get_type(ost->filter->filter) == AVMEDIA_TYPE_AUDIO)
3969 
3970  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
3971  return ret;
3972  if (!ist)
3973  return 0;
3974  } else if (ost->filter) {
3975  int i;
3976  for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
3977  InputFilter *ifilter = ost->filter->graph->inputs[i];
3978  if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
3979  ist = ifilter->ist;
3980  break;
3981  }
3982  }
3983  if (!ist) {
3984  ost->inputs_done = 1;
3985  return 0;
3986  }
3987  } else {
3988  ist = ost->ist;
3989  av_assert0(ist);
3990  }
3991 
3992  ret = process_input(ist->file_index);
3993  if (ret == AVERROR(EAGAIN)) {
3994  if (input_files[ist->file_index]->eagain)
3995  ost->unavailable = 1;
3996  return 0;
3997  }
3998 
3999  if (ret < 0)
4000  return ret == AVERROR_EOF ? 0 : ret;
4001 
4002  return reap_filters(0);
4003 }
4004 
4005 /*
4006  * The following code is the main loop of the file converter
4007  */
4008 static int transcode(void)
4009 {
4010  int ret, i;
4011  InputStream *ist;
4012  int64_t timer_start;
4013  int64_t total_packets_written = 0;
4014 
4015  ret = transcode_init();
4016  if (ret < 0)
4017  goto fail;
4018 
4019  if (stdin_interaction) {
4020  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4021  }
4022 
4023  timer_start = av_gettime_relative();
4024 
4025  while (!received_sigterm) {
4026  int64_t cur_time= av_gettime_relative();
4027 
4028  /* if 'q' pressed, exits */
4029  if (stdin_interaction)
4030  if (check_keyboard_interaction(cur_time) < 0)
4031  break;
4032 
4033  /* check if there's any stream where output is still needed */
4034  if (!need_output()) {
4035  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4036  break;
4037  }
4038 
4039  ret = transcode_step();
4040  if (ret < 0 && ret != AVERROR_EOF) {
4041  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4042  break;
4043  }
4044 
4045  /* dump report by using the output first video and audio streams */
4046  print_report(0, timer_start, cur_time);
4047  }
4048 
4049  /* at the end of stream, we must flush the decoder buffers */
4050  for (ist = ist_iter(NULL); ist; ist = ist_iter(ist)) {
4051  if (!input_files[ist->file_index]->eof_reached) {
4052  process_input_packet(ist, NULL, 0);
4053  }
4054  }
4055  flush_encoders();
4056 
4057  term_exit();
4058 
4059  /* write the trailer if needed */
4060  for (i = 0; i < nb_output_files; i++) {
4062  if (ret < 0 && exit_on_error)
4063  exit_program(1);
4064  }
4065 
4066  /* dump report by using the first video and audio streams */
4067  print_report(1, timer_start, av_gettime_relative());
4068 
4069  /* close each encoder */
4070  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
4071  uint64_t packets_written;
4072  packets_written = atomic_load(&ost->packets_written);
4073  total_packets_written += packets_written;
4074  if (!packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) {
4075  av_log(ost, AV_LOG_FATAL, "Empty output\n");
4076  exit_program(1);
4077  }
4078  }
4079 
4080  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4081  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4082  exit_program(1);
4083  }
4084 
4086 
4087  /* finished ! */
4088  ret = 0;
4089 
4090  fail:
4091  return ret;
4092 }
4093 
4095 {
4096  BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4097 #if HAVE_GETRUSAGE
4098  struct rusage rusage;
4099 
4100  getrusage(RUSAGE_SELF, &rusage);
4101  time_stamps.user_usec =
4102  (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4103  time_stamps.sys_usec =
4104  (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4105 #elif HAVE_GETPROCESSTIMES
4106  HANDLE proc;
4107  FILETIME c, e, k, u;
4108  proc = GetCurrentProcess();
4109  GetProcessTimes(proc, &c, &e, &k, &u);
4110  time_stamps.user_usec =
4111  ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4112  time_stamps.sys_usec =
4113  ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4114 #else
4115  time_stamps.user_usec = time_stamps.sys_usec = 0;
4116 #endif
4117  return time_stamps;
4118 }
4119 
4120 static int64_t getmaxrss(void)
4121 {
4122 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4123  struct rusage rusage;
4124  getrusage(RUSAGE_SELF, &rusage);
4125  return (int64_t)rusage.ru_maxrss * 1024;
4126 #elif HAVE_GETPROCESSMEMORYINFO
4127  HANDLE proc;
4128  PROCESS_MEMORY_COUNTERS memcounters;
4129  proc = GetCurrentProcess();
4130  memcounters.cb = sizeof(memcounters);
4131  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4132  return memcounters.PeakPagefileUsage;
4133 #else
4134  return 0;
4135 #endif
4136 }
4137 
4138 int main(int argc, char **argv)
4139 {
4140  int ret;
4142 
4143  init_dynload();
4144 
4146 
4147  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4148 
4150  parse_loglevel(argc, argv, options);
4151 
4152 #if CONFIG_AVDEVICE
4154 #endif
4156 
4157  show_banner(argc, argv, options);
4158 
4159  /* parse options and open all input/output files */
4160  ret = ffmpeg_parse_options(argc, argv);
4161  if (ret < 0)
4162  exit_program(1);
4163 
4164  if (nb_output_files <= 0 && nb_input_files == 0) {
4165  show_usage();
4166  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4167  exit_program(1);
4168  }
4169 
4170  /* file converter / grab */
4171  if (nb_output_files <= 0) {
4172  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4173  exit_program(1);
4174  }
4175 
4177  if (transcode() < 0)
4178  exit_program(1);
4179  if (do_benchmark) {
4180  int64_t utime, stime, rtime;
4182  utime = current_time.user_usec - ti.user_usec;
4183  stime = current_time.sys_usec - ti.sys_usec;
4184  rtime = current_time.real_usec - ti.real_usec;
4186  "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
4187  utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
4188  }
4189  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4192  exit_program(69);
4193 
4195  return main_return_code;
4196 }
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:31
sub2video_copy_rect
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg.c:179
AVSubtitle
Definition: avcodec.h:2330
KeyframeForceCtx::pts
int64_t * pts
Definition: ffmpeg.h:552
avcodec_encode_subtitle
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub)
Definition: encode.c:164
InputFilter::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg.h:284
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:422
AVCodec
AVCodec.
Definition: codec.h:184
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
av_codec_get_id
enum AVCodecID av_codec_get_id(const struct AVCodecTag *const *tags, unsigned int tag)
Get the AVCodecID for the given codec tag tag.
AVFILTER_CMD_FLAG_ONE
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:733
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:219
need_output
static int need_output(void)
Definition: ffmpeg.c:3455
check_output_constraints
static int check_output_constraints(InputStream *ist, OutputStream *ost)
Definition: ffmpeg.c:1847
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
av_buffersink_get_sample_aspect_ratio
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
psnr
static double psnr(double d)
Definition: ffmpeg.c:751
AV_FIELD_PROGRESSIVE
@ AV_FIELD_PROGRESSIVE
Definition: codec_par.h:40
AVERROR_EXPERIMENTAL
#define AVERROR_EXPERIMENTAL
Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it.
Definition: error.h:74
KeyframeForceCtx::dropped_keyframe
int dropped_keyframe
Definition: ffmpeg.h:559
AV_CODEC_ID_AC3
@ AV_CODEC_ID_AC3
Definition: codec_id.h:441
ifilter_parameters_from_codecpar
static int ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
Definition: ffmpeg.c:1766
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:125
INFINITY
#define INFINITY
Definition: mathematics.h:67
av_clip
#define av_clip
Definition: common.h:95
FKF_PREV_FORCED_T
@ FKF_PREV_FORCED_T
Definition: ffmpeg.h:491
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
dec_ctx
static AVCodecContext * dec_ctx
Definition: decode_filter_audio.c:46
reset_eagain
static void reset_eagain(void)
Definition: ffmpeg.c:3631
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:521
VSYNC_VFR
@ VSYNC_VFR
Definition: ffmpeg.h:62
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nb_input_files
int nb_input_files
Definition: ffmpeg.c:144
opt.h
configure_filtergraph
int configure_filtergraph(FilterGraph *fg)
Definition: ffmpeg_filter.c:1112
ffmpeg_exited
static volatile int ffmpeg_exited
Definition: ffmpeg.c:333
AVCodecContext::get_format
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
Callback to negotiate the pixel format.
Definition: avcodec.h:689
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:58
LIBAVCODEC_IDENT
#define LIBAVCODEC_IDENT
Definition: version.h:43
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1002
av_compare_ts
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
libm.h
FrameData
Definition: ffmpeg.c:117
InputFilter::width
int width
Definition: ffmpeg.h:283
AVProgram::nb_stream_indexes
unsigned int nb_stream_indexes
Definition: avformat.h:1033
InputFilter::displaymatrix
int32_t * displaymatrix
Definition: ffmpeg.h:290
ENC_STATS_PTS
@ ENC_STATS_PTS
Definition: ffmpeg.h:507
out
FILE * out
Definition: movenc.c:54
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:242
ENC_STATS_FRAME_NUM_IN
@ ENC_STATS_FRAME_NUM_IN
Definition: ffmpeg.h:504
init_output_stream
static int init_output_stream(OutputStream *ost, AVFrame *frame, char *error, int error_len)
Definition: ffmpeg.c:3203
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1034
KF_FORCE_SOURCE_NO_DROP
@ KF_FORCE_SOURCE_NO_DROP
Definition: ffmpeg.h:543
FKF_PREV_FORCED_N
@ FKF_PREV_FORCED_N
Definition: ffmpeg.h:490
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:682
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
FilterGraph::graph_desc
const char * graph_desc
Definition: ffmpeg.h:321
AVCodecParameters
This struct describes the properties of an encoded stream.
Definition: codec_par.h:54
InputStream::data_size
uint64_t data_size
Definition: ffmpeg.h:432
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
AV_LOG_QUIET
#define AV_LOG_QUIET
Print no output.
Definition: log.h:162
sub
static float sub(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:31
AVFrame::duration
int64_t duration
Duration of the frame, in the same units as pts.
Definition: frame.h:728
thread.h
AV_RL64
uint64_t_TMPL AV_RL64
Definition: bytestream.h:91
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2888
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVFMT_VARIABLE_FPS
#define AVFMT_VARIABLE_FPS
Format allows variable fps.
Definition: avformat.h:482
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
FilterGraph::inputs
InputFilter ** inputs
Definition: ffmpeg.h:329
AV_DISPOSITION_ATTACHED_PIC
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:769
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:146
ENC_STATS_DTS
@ ENC_STATS_DTS
Definition: ffmpeg.h:511
sub2video_heartbeat
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:268
avcodec_parameters_from_context
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: codec_par.c:99
remove_avoptions
void remove_avoptions(AVDictionary **a, AVDictionary *b)
Definition: ffmpeg.c:620
AVPictureType
AVPictureType
Definition: avutil.h:272
KeyframeForceCtx::nb_pts
int nb_pts
Definition: ffmpeg.h:553
frame_drop_threshold
float frame_drop_threshold
Definition: ffmpeg_opt.c:71
InputStream::dec_ctx
AVCodecContext * dec_ctx
Definition: ffmpeg.h:353
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
transcode_step
static int transcode_step(void)
Run a single step of transcoding.
Definition: ffmpeg.c:3918
BenchmarkTimeStamps::user_usec
int64_t user_usec
Definition: ffmpeg.c:125
AVSubtitleRect
Definition: avcodec.h:2302
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2334
ENC_STATS_AVG_BITRATE
@ ENC_STATS_AVG_BITRATE
Definition: ffmpeg.h:517
AV_BUFFERSRC_FLAG_KEEP_REF
@ AV_BUFFERSRC_FLAG_KEEP_REF
Keep a reference to the frame.
Definition: buffersrc.h:53
ist_iter
InputStream * ist_iter(InputStream *prev)
Definition: ffmpeg.c:604
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:99
OutputFile::start_time
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:706
SQFRAME
#define SQFRAME(frame)
Definition: sync_queue.h:38
InputFilter::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg.h:287
ffmpeg_parse_options
int ffmpeg_parse_options(int argc, char **argv)
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
pixdesc.h
cleanup
static av_cold void cleanup(FlashSV2Context *s)
Definition: flashsv2enc.c:130
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:437
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:995
AVFrame::width
int width
Definition: frame.h:402
of_filesize
int64_t of_filesize(OutputFile *of)
Definition: ffmpeg_mux.c:747
OutputStream::unavailable
int unavailable
Definition: ffmpeg.h:641
AVPacketSideData
Definition: packet.h:315
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:203
w
uint8_t w
Definition: llviddspenc.c:38
FKF_T
@ FKF_T
Definition: ffmpeg.h:492
sync_queue.h
LastFrameDuration::stream_idx
int stream_idx
Definition: ffmpeg.h:446
AVPacket::data
uint8_t * data
Definition: packet.h:374
current_time
static BenchmarkTimeStamps current_time
Definition: ffmpeg.c:140
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:57
ENC_STATS_LITERAL
@ ENC_STATS_LITERAL
Definition: ffmpeg.h:500
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:1031
ost_iter
static OutputStream * ost_iter(OutputStream *prev)
Definition: ffmpeg.c:588
AVFrame::top_field_first
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:491
dup_warning
static uint64_t dup_warning
Definition: ffmpeg.c:135
OutputStream::index
int index
Definition: ffmpeg.h:566
ATOMIC_VAR_INIT
#define ATOMIC_VAR_INIT(value)
Definition: stdatomic.h:31
b
#define b
Definition: input.c:41
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:156
fix_sub_duration_heartbeat
static int fix_sub_duration_heartbeat(InputStream *ist, int64_t signal_pts)
Definition: ffmpeg.c:2465
AVStream::avg_frame_rate
AVRational avg_frame_rate
Average framerate.
Definition: avformat.h:927
KeyframeForceCtx::type
int type
Definition: ffmpeg.h:547
nb_output_dumped
unsigned nb_output_dumped
Definition: ffmpeg.c:138
forced_kf_apply
static enum AVPictureType forced_kf_apply(void *logctx, KeyframeForceCtx *kf, AVRational tb, const AVFrame *in_picture, int dup_idx)
Definition: ffmpeg.c:1201
InputStream::nb_filters
int nb_filters
Definition: ffmpeg.h:417
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:75
AVCodecContext::subtitle_header
uint8_t * subtitle_header
Header containing style information for text subtitles.
Definition: avcodec.h:1723
version.h
AVSubtitleRect::linesize
int linesize[4]
Definition: avcodec.h:2314
transcode
static int transcode(void)
Definition: ffmpeg.c:4008
ffmpeg.h
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
BenchmarkTimeStamps::sys_usec
int64_t sys_usec
Definition: ffmpeg.c:126
progress_avio
AVIOContext * progress_avio
Definition: ffmpeg.c:141
show_usage
void show_usage(void)
Definition: ffmpeg_opt.c:1206
do_audio_out
static void do_audio_out(OutputFile *of, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:1025
FilterGraph::nb_inputs
int nb_inputs
Definition: ffmpeg.h:330
av_display_rotation_set
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure clockwise rotation by the specified angle (in de...
Definition: display.c:51
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:392
AVCodecParameters::codec_tag
uint32_t codec_tag
Additional information about the codec (corresponds to the AVI FOURCC).
Definition: codec_par.h:66
mathematics.h
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
AVDictionary
Definition: dict.c:32
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:585
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:305
trigger_fix_sub_duration_heartbeat
static int trigger_fix_sub_duration_heartbeat(OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:2484
decode_audio
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
Definition: ffmpeg.c:2145
program_birth_year
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffmpeg.c:112
LastFrameDuration
Definition: ffmpeg.h:445
InputStream::decoding_needed
int decoding_needed
Definition: ffmpeg.h:340
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:311
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:330
flush_encoders
static void flush_encoders(void)
Definition: ffmpeg.c:1784
ost
static AVStream * ost
Definition: vaapi_transcode.c:42
tf_sess_config.config
config
Definition: tf_sess_config.py:33
os_support.h
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:429
qp_hist
int qp_hist
Definition: ffmpeg_opt.c:83
OutputStream::ist
InputStream * ist
Definition: ffmpeg.h:571
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:73
term_exit_sigsafe
static void term_exit_sigsafe(void)
Definition: ffmpeg.c:316
init_output_stream_encode
static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:3028
ECHO
#define ECHO(name, type, min, max)
Definition: af_aecho.c:157
ENC_STATS_TIMEBASE_IN
@ ENC_STATS_TIMEBASE_IN
Definition: ffmpeg.h:506
AVIOInterruptCB
Callback for checking whether to abort blocking functions.
Definition: avio.h:59
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:119
InputFilter::ist
struct InputStream * ist
Definition: ffmpeg.h:273
OutputFile::nb_streams
int nb_streams
Definition: ffmpeg.h:701
av_memdup
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
Definition: mem.c:302
InputFile::eof_reached
int eof_reached
Definition: ffmpeg.h:454
exit_program
void exit_program(int ret)
Wraps exit with a program-specific cleanup routine.
Definition: cmdutils.c:99
InputStream
Definition: ffmpeg.h:335
filter_nbthreads
char * filter_nbthreads
Definition: ffmpeg_opt.c:86
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:1750
debug_ts
int debug_ts
Definition: ffmpeg_opt.c:79
got_eagain
static int got_eagain(void)
Definition: ffmpeg.c:3623
AVPacketSideData::size
size_t size
Definition: packet.h:317
ifilter_send_eof
static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
Definition: ffmpeg.c:2057
stats_period
int64_t stats_period
Definition: ffmpeg_opt.c:90
print_error
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:799
av_buffersink_set_frame_size
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
Definition: buffersink.c:182
dts_delta_threshold
float dts_delta_threshold
Definition: ffmpeg_opt.c:67
fifo.h
vstats_version
int vstats_version
Definition: ffmpeg_opt.c:88
submit_encode_frame
static int submit_encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:979
init_input_stream
static int init_input_stream(InputStream *ist, char *error, int error_len)
Definition: ffmpeg.c:2793
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:435
AVPacket::opaque_ref
AVBufferRef * opaque_ref
AVBufferRef for free use by the API user.
Definition: packet.h:410
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:2054
report_and_exit
void report_and_exit(int ret)
Reports an error corresponding to the provided AVERROR code and calls exit_program() with the corresp...
Definition: cmdutils.c:93
InputStream::sub2video
struct InputStream::sub2video sub2video
fail
#define fail()
Definition: checkasm.h:134
InputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:276
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
av_shrink_packet
void av_shrink_packet(AVPacket *pkt, int size)
Reduce packet size, correctly zeroing padding.
Definition: avpacket.c:112
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
FFSIGN
#define FFSIGN(a)
Definition: common.h:65
samplefmt.h
AVSubtitleRect::x
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2303
InputStream::decoder_opts
AVDictionary * decoder_opts
Definition: ffmpeg.h:391
AVProgram::discard
enum AVDiscard discard
selects which program to discard and which to feed to the caller
Definition: avformat.h:1031
InputStream::filter_in_rescale_delta_last
int64_t filter_in_rescale_delta_last
Definition: ffmpeg.h:378
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:422
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:506
KeyframeForceCtx::ref_pts
int64_t ref_pts
Definition: ffmpeg.h:549
sub2video_get_blank_frame
static int sub2video_get_blank_frame(InputStream *ist)
Definition: ffmpeg.c:164
InputStream::nb_packets
uint64_t nb_packets
Definition: ffmpeg.h:434
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:723
sq_receive
int sq_receive(SyncQueue *sq, int stream_idx, SyncQueueFrame frame)
Read a frame from the queue.
Definition: sync_queue.c:339
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1389
pts
static int64_t pts
Definition: transcode_aac.c:653
hw_device_setup_for_decode
int hw_device_setup_for_decode(InputStream *ist)
Definition: ffmpeg_hw.c:317
us
#define us(width, name, range_min, range_max, subs,...)
Definition: cbs_h2645.c:276
AV_PKT_DATA_DISPLAYMATRIX
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:109
AV_CODEC_ID_MP3
@ AV_CODEC_ID_MP3
preferred ID for decoding MPEG audio layer 1, 2 or 3
Definition: codec_id.h:439
AVStream::duration
int64_t duration
Decoding: duration of the stream, in stream time base.
Definition: avformat.h:897
av_codec_get_tag2
int av_codec_get_tag2(const struct AVCodecTag *const *tags, enum AVCodecID id, unsigned int *tag)
Get the codec tag for the given codec id.
FrameData::tb
AVRational tb
Definition: ffmpeg.c:120
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
InputStream::sub2video::last_pts
int64_t last_pts
Definition: ffmpeg.h:406
AVRational::num
int num
Numerator.
Definition: rational.h:59
avformat_network_init
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:557
InputFile
Definition: ffmpeg.h:450
AVSubtitleRect::ass
char * ass
0 terminated ASS/SSA compatible event line.
Definition: avcodec.h:2325
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:309
init_output_stream_streamcopy
static int init_output_stream_streamcopy(OutputStream *ost)
Definition: ffmpeg.c:2852
avsubtitle_free
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: avcodec.c:409
AV_DICT_DONT_STRDUP_VAL
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:79
AV_CODEC_ID_DVB_SUBTITLE
@ AV_CODEC_ID_DVB_SUBTITLE
Definition: codec_id.h:545
InputStream::last_pkt_repeat_pict
int last_pkt_repeat_pict
Definition: ffmpeg.h:376
avcodec_decode_subtitle2
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, const AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:859
ffmpeg_cleanup
static void ffmpeg_cleanup(int ret)
Definition: ffmpeg.c:502
InputStream::first_dts
int64_t first_dts
dts of the first packet read for this stream (in AV_TIME_BASE units)
Definition: ffmpeg.h:365
InputStream::hwaccel_pix_fmt
enum AVPixelFormat hwaccel_pix_fmt
Definition: ffmpeg.h:428
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:75
avassert.h
InputStream::dts
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
Definition: ffmpeg.h:366
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:988
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM
#define ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM
Definition: ffmpeg.h:497
decode_flush
static void decode_flush(InputFile *ifile)
Definition: ffmpeg.c:3640
av_thread_message_queue_send
int av_thread_message_queue_send(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Send a message on the queue.
Definition: threadmessage.c:158
ENC_STATS_PTS_IN
@ ENC_STATS_PTS_IN
Definition: ffmpeg.h:509
choose_output
static OutputStream * choose_output(void)
Select the output stream to process.
Definition: ffmpeg.c:3472
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
BenchmarkTimeStamps::real_usec
int64_t real_usec
Definition: ffmpeg.c:124
duration
int64_t duration
Definition: movenc.c:64
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
AVCodecParameters::frame_size
int frame_size
Audio only.
Definition: codec_par.h:189
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:60
av_opt_set_dict
int av_opt_set_dict(void *obj, AVDictionary **options)
Set all the options from a given dictionary on an object.
Definition: opt.c:1767
float
float
Definition: af_crystalizer.c:122
HWACCEL_GENERIC
@ HWACCEL_GENERIC
Definition: ffmpeg.h:72
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:721
VSYNC_VSCFR
@ VSYNC_VSCFR
Definition: ffmpeg.h:63
EncStats::components
EncStatsComponent * components
Definition: ffmpeg.h:528
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:153
AVCodec::supported_framerates
const AVRational * supported_framerates
array of supported framerates, or NULL if any, array is terminated by {0,0}
Definition: codec.h:205
llrintf
#define llrintf(x)
Definition: libm.h:399
AVCodecDescriptor
This struct describes the properties of a single codec described by an AVCodecID.
Definition: codec_desc.h:38
InputStream::cfr_next_pts
int64_t cfr_next_pts
Definition: ffmpeg.h:385
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:256
av_new_packet
int av_new_packet(AVPacket *pkt, int size)
Allocate the payload of a packet and initialize its fields with default values.
Definition: avpacket.c:97
AVCodecContext::global_quality
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:492
get_benchmark_time_stamps
static BenchmarkTimeStamps get_benchmark_time_stamps(void)
Definition: ffmpeg.c:4094
FilterGraph::outputs
OutputFilter ** outputs
Definition: ffmpeg.h:331
vstats_filename
char * vstats_filename
Definition: ffmpeg_opt.c:63
ifilter_has_all_input_formats
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg.c:1966
copy_ts_first_pts
static int64_t copy_ts_first_pts
Definition: ffmpeg.c:335
close_output_stream
static void close_output_stream(OutputStream *ost)
Definition: ffmpeg.c:664
InputStream::framerate
AVRational framerate
Definition: ffmpeg.h:392
av_realloc_array
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Definition: mem.c:215
AVCodecParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only.
Definition: codec_par.h:138
ENC_STATS_FILE_IDX
@ ENC_STATS_FILE_IDX
Definition: ffmpeg.h:501
AVDictionaryEntry::key
char * key
Definition: dict.h:90
ENCODER_FINISHED
@ ENCODER_FINISHED
Definition: ffmpeg.h:537
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
AVSubtitleRect::y
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2304
AVCodecParameters::width
int width
Video only.
Definition: codec_par.h:128
AVCodecContext::ticks_per_frame
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:557
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
InputFilter
Definition: ffmpeg.h:271
AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
Definition: codec.h:156
av_buffersink_get_format
int av_buffersink_get_format(const AVFilterContext *ctx)
avcodec_receive_frame
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder or encoder (when the AV_CODEC_FLAG_RECON_FRAME flag is used...
Definition: avcodec.c:709
ENC_STATS_BITRATE
@ ENC_STATS_BITRATE
Definition: ffmpeg.h:516
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
term_init
void term_init(void)
Definition: ffmpeg.c:395
do_streamcopy
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:1863
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
OutputFilter::ost
struct OutputStream * ost
Definition: ffmpeg.h:297
sq_set_tb
void sq_set_tb(SyncQueue *sq, unsigned int stream_idx, AVRational tb)
Set the timebase for the stream with index stream_idx.
Definition: sync_queue.c:378
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1487
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
AVPacketSideData::data
uint8_t * data
Definition: packet.h:316
MUXER_FINISHED
@ MUXER_FINISHED
Definition: ffmpeg.h:538
video_sync_process
static void video_sync_process(OutputFile *of, OutputStream *ost, AVFrame *next_picture, double duration, int64_t *nb_frames, int64_t *nb_frames_prev)
Definition: ffmpeg.c:1132
ctx
AVFormatContext * ctx
Definition: movenc.c:48
InputStream::filters
InputFilter ** filters
Definition: ffmpeg.h:416
limits.h
av_expr_eval
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:766
nb_streams
static int nb_streams
Definition: ffprobe.c:309
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:465
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
max_error_rate
float max_error_rate
Definition: ffmpeg_opt.c:85
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2336
abort_codec_experimental
static void abort_codec_experimental(const AVCodec *c, int encoder)
Definition: ffmpeg.c:638
on
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going on
Definition: writing_filters.txt:34
term_exit
void term_exit(void)
Definition: ffmpeg.c:324
AVOutputFormat::codec_tag
const struct AVCodecTag *const * codec_tag
List of supported codec_id-codec_tag pairs, ordered by "better choice first".
Definition: avformat.h:533
av_hwdevice_get_type_name
const char * av_hwdevice_get_type_name(enum AVHWDeviceType type)
Get the string name of an AVHWDeviceType.
Definition: hwcontext.c:93
InputStream::hwaccel_retrieve_data
int(* hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame)
Definition: ffmpeg.h:427
enc_stats_write
void enc_stats_write(OutputStream *ost, EncStats *es, const AVFrame *frame, const AVPacket *pkt, uint64_t frame_num)
Definition: ffmpeg.c:811
AV_CODEC_ID_CODEC2
@ AV_CODEC_ID_CODEC2
Definition: codec_id.h:505
InputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:274
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1436
key
const char * key
Definition: hwcontext_opencl.c:174
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
AVMEDIA_TYPE_DATA
@ AVMEDIA_TYPE_DATA
Opaque data information usually continuous.
Definition: avutil.h:203
ts_discontinuity_process
static void ts_discontinuity_process(InputFile *ifile, InputStream *ist, AVPacket *pkt)
Definition: ffmpeg.c:3735
atomic_load
#define atomic_load(object)
Definition: stdatomic.h:93
AVSubtitleRect::text
char * text
0 terminated plain UTF-8 text
Definition: avcodec.h:2318
assert_avoptions
void assert_avoptions(AVDictionary *m)
Definition: ffmpeg.c:629
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:79
process_input_packet
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
Definition: ffmpeg.c:2552
av_rescale_delta
int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb)
Rescale a timestamp while preserving known durations.
Definition: mathematics.c:168
command
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:913
process_input
static int process_input(int file_index)
Definition: ffmpeg.c:3762
init_output_stream_wrapper
static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame, unsigned int fatal)
Definition: ffmpeg.c:730
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:436
arg
const char * arg
Definition: jacosubdec.c:67
AVCodecDescriptor::props
int props
Codec properties, a combination of AV_CODEC_PROP_* flags.
Definition: codec_desc.h:54
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:64
KeyframeForceCtx::expr_const_values
double expr_const_values[FKF_NB]
Definition: ffmpeg.h:557
of_enc_stats_close
void of_enc_stats_close(void)
Definition: ffmpeg_mux_init.c:218
if
if(ret)
Definition: filter_design.txt:179
avio_flush
void avio_flush(AVIOContext *s)
Force flushing of buffered data.
Definition: aviobuf.c:244
transcode_from_filter
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
Perform a step of transcoding for the specified filter graph.
Definition: ffmpeg.c:3872
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:76
InputStream::pts
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:369
AVFormatContext
Format I/O context.
Definition: avformat.h:1104
av_log_get_level
int av_log_get_level(void)
Get the current log level.
Definition: log.c:437
init_dynload
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:77
opts
AVDictionary * opts
Definition: movenc.c:50
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:861
AVSubtitleRect::w
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:2305
print_final_stats
static void print_final_stats(int64_t total_size)
Definition: ffmpeg.c:1449
transcode_subtitles
static int transcode_subtitles(InputStream *ist, const AVPacket *pkt, int *got_output, int *decode_failed)
Definition: ffmpeg.c:2515
nb_frames_drop
static int64_t nb_frames_drop
Definition: ffmpeg.c:136
OutputFilter::name
uint8_t * name
Definition: ffmpeg.h:299
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:877
NULL
#define NULL
Definition: coverity.c:32
InputStream::sub2video::w
int w
Definition: ffmpeg.h:410
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1009
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
InputStream::top_field_first
int top_field_first
Definition: ffmpeg.h:393
InputStream::st
AVStream * st
Definition: ffmpeg.h:337
AV_DICT_MULTIKEY
#define AV_DICT_MULTIKEY
Allow to store several equal keys in the dictionary.
Definition: dict.h:84
main
int main(int argc, char **argv)
Definition: ffmpeg.c:4138
update_benchmark
static void update_benchmark(const char *fmt,...)
Definition: ffmpeg.c:643
AVCodec::type
enum AVMediaType type
Definition: codec.h:197
send_frame_to_filters
static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
Definition: ffmpeg.c:2127
decode_video
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof, int *decode_failed)
Definition: ffmpeg.c:2202
ENC_STATS_PTS_TIME
@ ENC_STATS_PTS_TIME
Definition: ffmpeg.h:508
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:168
of_close
void of_close(OutputFile **pof)
Definition: ffmpeg_mux.c:720
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
getmaxrss
static int64_t getmaxrss(void)
Definition: ffmpeg.c:4120
InputStream::next_pts
int64_t next_pts
synthetic pts for the next decode frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:368
AVPacketSideData::type
enum AVPacketSideDataType type
Definition: packet.h:318
AVPALETTE_SIZE
#define AVPALETTE_SIZE
Definition: pixfmt.h:32
check_keyboard_interaction
static int check_keyboard_interaction(int64_t cur_time)
Definition: ffmpeg.c:3514
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
AVCodecContext::subtitle_header_size
int subtitle_header_size
Definition: avcodec.h:1724
av_log_set_flags
void av_log_set_flags(int arg)
Definition: log.c:447
AVSubtitleRect::data
uint8_t * data[4]
data+linesize for the bitmap of this subtitle.
Definition: avcodec.h:2313
AV_BUFFERSRC_FLAG_PUSH
@ AV_BUFFERSRC_FLAG_PUSH
Immediately push the frame to the output.
Definition: buffersrc.h:46
AV_CODEC_PROP_BITMAP_SUB
#define AV_CODEC_PROP_BITMAP_SUB
Subtitle codec is bitmap based Decoded AVSubtitle data can be read from the AVSubtitleRect->pict fiel...
Definition: codec_desc.h:97
parseutils.h
InputStream::hwaccel_id
enum HWAccelID hwaccel_id
Definition: ffmpeg.h:422
EncStats
Definition: ffmpeg.h:527
mathops.h
AVProgram::stream_index
unsigned int * stream_index
Definition: avformat.h:1032
main_return_code
int main_return_code
Definition: ffmpeg.c:334
vstats_file
static FILE * vstats_file
Definition: ffmpeg.c:114
ifile_get_packet
int ifile_get_packet(InputFile *f, AVPacket **pkt)
Get next input packet from the demuxer.
Definition: ffmpeg_demux.c:410
AVStream::metadata
AVDictionary * metadata
Definition: avformat.h:918
AV_ROUND_NEAR_INF
@ AV_ROUND_NEAR_INF
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:84
InputFilter::eof
int eof
Definition: ffmpeg.h:292
InputStream::fix_sub_duration
int fix_sub_duration
Definition: ffmpeg.h:398
AV_CODEC_CAP_VARIABLE_FRAME_SIZE
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: codec.h:125
double
double
Definition: af_crystalizer.c:132
transcode_init
static int transcode_init(void)
Definition: ffmpeg.c:3315
avfilter_graph_request_oldest
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
Definition: avfiltergraph.c:1295
get_format
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
Definition: ffmpeg.c:2744
AV_DICT_DONT_OVERWRITE
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:81
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:115
sub2video_push_ref
static void sub2video_push_ref(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:210
time.h
avio_w8
void avio_w8(AVIOContext *s, int b)
Definition: aviobuf.c:200
received_nb_signals
static volatile int received_nb_signals
Definition: ffmpeg.c:331
do_benchmark_all
int do_benchmark_all
Definition: ffmpeg_opt.c:73
av_packet_ref
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:430
AVCodecParameters::ch_layout
AVChannelLayout ch_layout
Audio only.
Definition: codec_par.h:213
InputStream::sub2video::sub_queue
AVFifo * sub_queue
queue of AVSubtitle* before filter init
Definition: ffmpeg.h:408
ENC_STATS_PTS_TIME_IN
@ ENC_STATS_PTS_TIME_IN
Definition: ffmpeg.h:510
FilterGraph::nb_outputs
int nb_outputs
Definition: ffmpeg.h:332
swresample.h
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
InputStream::sub2video::initialize
unsigned int initialize
marks if sub2video_update should force an initialization
Definition: ffmpeg.h:411
AVCodecParameters::sample_rate
int sample_rate
Audio only.
Definition: codec_par.h:178
InputStream::par
AVCodecParameters * par
Codec parameters - to be used by the decoding/streamcopy code.
Definition: ffmpeg.h:352
input_files
InputFile ** input_files
Definition: ffmpeg.c:143
OutputFile::streams
OutputStream ** streams
Definition: ffmpeg.h:700
InputStream::frames_decoded
uint64_t frames_decoded
Definition: ffmpeg.h:436
InputStream::next_dts
int64_t next_dts
Definition: ffmpeg.h:364
AVFrame::best_effort_timestamp
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:612
FilterGraph
Definition: ffmpeg.h:319
AVCodecParameters::extradata_size
int extradata_size
Size of the extradata content in bytes.
Definition: codec_par.h:80
print_stats
int print_stats
Definition: ffmpeg_opt.c:82
InputFilter::filter
AVFilterContext * filter
Definition: ffmpeg.h:272
AVOutputFormat::flags
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS,...
Definition: avformat.h:527
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
decode_interrupt_cb
static int decode_interrupt_cb(void *ctx)
Definition: ffmpeg.c:495
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:548
av_buffersrc_close
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:263
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1364
options
const OptionDef options[]
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1296
f
f
Definition: af_crystalizer.c:122
AVIOContext
Bytestream IO Context.
Definition: avio.h:166
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:427
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
InputStream::hwaccel_device_type
enum AVHWDeviceType hwaccel_device_type
Definition: ffmpeg.h:423
AVMediaType
AVMediaType
Definition: avutil.h:199
InputStream::decoded_frame
AVFrame * decoded_frame
Definition: ffmpeg.h:355
AVPacket::size
int size
Definition: packet.h:375
AVDISCARD_DEFAULT
@ AVDISCARD_DEFAULT
discard useless packets like 0 size packets in avi
Definition: defs.h:71
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:344
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
threadmessage.h
InputStream::file_index
int file_index
Definition: ffmpeg.h:336
do_video_out
static void do_video_out(OutputFile *of, OutputStream *ost, AVFrame *next_picture)
Definition: ffmpeg.c:1253
output_files
OutputFile ** output_files
Definition: ffmpeg.c:146
SIGNAL
#define SIGNAL(sig, func)
Definition: ffmpeg.c:391
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:470
update_video_stats
static void update_video_stats(OutputStream *ost, const AVPacket *pkt, int write_vstats)
Definition: ffmpeg.c:756
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:121
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:516
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
received_sigterm
static volatile int received_sigterm
Definition: ffmpeg.c:330
start_time
static int64_t start_time
Definition: ffplay.c:331
FilterGraph::graph
AVFilterGraph * graph
Definition: ffmpeg.h:323
AVFormatContext::url
char * url
input or output URL.
Definition: avformat.h:1187
InputStream::pkt
AVPacket * pkt
Definition: ffmpeg.h:356
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1050
send_filter_eof
static int send_filter_eof(InputStream *ist)
Definition: ffmpeg.c:2536
AVCodecContext::pkt_timebase
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:1764
sq_send
int sq_send(SyncQueue *sq, unsigned int stream_idx, SyncQueueFrame frame)
Submit a frame for the stream with index stream_idx.
Definition: sync_queue.c:234
InputStream::got_output
int got_output
Definition: ffmpeg.h:400
uninit_opts
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents.
Definition: cmdutils.c:64
ENC_STATS_NB_SAMPLES
@ ENC_STATS_NB_SAMPLES
Definition: ffmpeg.h:514
copy_ts
int copy_ts
Definition: ffmpeg_opt.c:76
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
copy_tb
int copy_tb
Definition: ffmpeg_opt.c:78
avfilter_graph_queue_command
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
Definition: avfiltergraph.c:1215
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
av_get_audio_frame_duration2
int av_get_audio_frame_duration2(AVCodecParameters *par, int frame_bytes)
This function is the same as av_get_audio_frame_duration(), except it works with AVCodecParameters in...
Definition: utils.c:837
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:452
AVFrameSideData::data
uint8_t * data
Definition: frame.h:238
hwaccel_decode_init
int hwaccel_decode_init(AVCodecContext *avctx)
Definition: ffmpeg_hw.c:542
av_stream_get_codec_timebase
AVRational av_stream_get_codec_timebase(const AVStream *st)
Get the internal codec timebase from a stream.
Definition: avformat.c:765
OutputFile::url
const char * url
Definition: ffmpeg.h:698
DECODING_FOR_OST
#define DECODING_FOR_OST
Definition: ffmpeg.h:341
sub2video_update
void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
Definition: ffmpeg.c:228
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:417
AV_PICTURE_TYPE_NONE
@ AV_PICTURE_TYPE_NONE
Undefined.
Definition: avutil.h:273
FrameData::idx
uint64_t idx
Definition: ffmpeg.c:118
AVStream::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
Definition: avformat.h:916
AVSubtitle::end_display_time
uint32_t end_display_time
Definition: avcodec.h:2333
avdevice.h
AVSubtitleRect::type
enum AVSubtitleType type
Definition: avcodec.h:2316
show_banner
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: opt_common.c:237
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:373
avio_write
void avio_write(AVIOContext *s, const unsigned char *buf, int size)
Definition: aviobuf.c:222
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:293
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:432
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
InputStream::samples_decoded
uint64_t samples_decoded
Definition: ffmpeg.h:437
FrameData::pts
int64_t pts
Definition: ffmpeg.c:119
AV_FIELD_TT
@ AV_FIELD_TT
Top coded_first, top displayed first.
Definition: codec_par.h:41
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:380
decode
static int decode(InputStream *ist, AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: ffmpeg.c:2087
do_benchmark
int do_benchmark
Definition: ffmpeg_opt.c:72
OutputFile::sq_encode
SyncQueue * sq_encode
Definition: ffmpeg.h:703
KF_FORCE_SOURCE
@ KF_FORCE_SOURCE
Definition: ffmpeg.h:542
bitrate
int64_t bitrate
Definition: h264_levels.c:131
av_packet_rescale_ts
void av_packet_rescale_ts(AVPacket *pkt, AVRational src_tb, AVRational dst_tb)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another.
Definition: avpacket.c:526
av_buffersink_get_type
enum AVMediaType av_buffersink_get_type(const AVFilterContext *ctx)
ENC_STATS_FRAME_NUM
@ ENC_STATS_FRAME_NUM
Definition: ffmpeg.h:503
KeyframeForceCtx
Definition: ffmpeg.h:546
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:932
SUBTITLE_BITMAP
@ SUBTITLE_BITMAP
A bitmap, pict will be set.
Definition: avcodec.h:2285
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
nb_frames_dup
static int64_t nb_frames_dup
Definition: ffmpeg.c:134
AVStream::side_data
AVPacketSideData * side_data
An array of side data that applies to the whole stream (i.e.
Definition: avformat.h:956
buffersink.h
AVCodec::id
enum AVCodecID id
Definition: codec.h:198
do_subtitle_out
static void do_subtitle_out(OutputFile *of, OutputStream *ost, AVSubtitle *sub)
Definition: ffmpeg.c:1051
av_find_nearest_q_idx
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:142
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:40
register_exit
void register_exit(void(*cb)(int ret))
Register a program-specific cleanup routine.
Definition: cmdutils.c:88
avcodec_get_name
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:442
HWACCEL_AUTO
@ HWACCEL_AUTO
Definition: ffmpeg.h:71
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
AVSubtitleRect::flags
int flags
Definition: avcodec.h:2327
avcodec_send_packet
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:598
av_buffersink_get_w
int av_buffersink_get_w(const AVFilterContext *ctx)
avio_closep
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:1280
av_log_set_level
void av_log_set_level(int level)
Set the log level.
Definition: log.c:442
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:410
bprint.h
DECODING_FOR_FILTER
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:342
of_output_packet
void of_output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int eof)
Definition: ffmpeg_mux.c:329
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:367
InputStream::ret
int ret
Definition: ffmpeg.h:401
process_subtitle
static int process_subtitle(InputStream *ist, AVSubtitle *subtitle, int *got_output)
Definition: ffmpeg.c:2326
InputStream::want_frame_data
int want_frame_data
Definition: ffmpeg.h:345
AV_FRAME_FLAG_CORRUPT
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:573
check_recording_time
static int check_recording_time(OutputStream *ost, int64_t ts, AVRational tb)
Definition: ffmpeg.c:673
av_get_bytes_per_sample
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:108
AVSubtitleRect::nb_colors
int nb_colors
number of colors in pict, undefined when pict is not set
Definition: avcodec.h:2307
av_packet_get_side_data
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, size_t *size)
Get side information from packet.
Definition: avpacket.c:251
sub2video_flush
static void sub2video_flush(InputStream *ist)
Definition: ffmpeg.c:300
internal.h
AVCodecParameters::height
int height
Definition: codec_par.h:129
avcodec_parameters_to_context
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: codec_par.c:182
AV_TIME_BASE
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
AVCodecParameters::block_align
int block_align
Audio only.
Definition: codec_par.h:185
av_buffersrc_add_frame_flags
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:175
VSYNC_CFR
@ VSYNC_CFR
Definition: ffmpeg.h:61
src2
const pixel * src2
Definition: h264pred_template.c:422
OutputFile::bitexact
int bitexact
Definition: ffmpeg.h:709
display.h
vsnprintf
#define vsnprintf
Definition: snprintf.h:36
exit_on_error
int exit_on_error
Definition: ffmpeg_opt.c:80
sigterm_handler
static void sigterm_handler(int sig)
Definition: ffmpeg.c:338
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
delta
float delta
Definition: vorbis_enc_data.h:430
InputStream::processing_needed
int processing_needed
Definition: ffmpeg.h:343
InputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:453
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:507
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:478
tb
#define tb
Definition: regdef.h:68
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
AVProgram
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1028
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:191
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AV_PKT_DATA_CPB_PROPERTIES
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:146
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1016
ENC_STATS_STREAM_IDX
@ ENC_STATS_STREAM_IDX
Definition: ffmpeg.h:502
filtergraphs
FilterGraph ** filtergraphs
Definition: ffmpeg.c:149
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
int_cb
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:500
AVCodecContext::height
int height
Definition: avcodec.h:598
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:483
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:635
ENC_STATS_SAMPLE_NUM
@ ENC_STATS_SAMPLE_NUM
Definition: ffmpeg.h:513
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
nb_output_files
int nb_output_files
Definition: ffmpeg.c:147
parse_loglevel
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:474
av_buffersink_get_h
int av_buffersink_get_h(const AVFilterContext *ctx)
AVCodecContext::frame_num
int64_t frame_num
Frame counter, set by libavcodec.
Definition: avcodec.h:2065
AVStream::disposition
int disposition
Stream disposition - a combination of AV_DISPOSITION_* flags.
Definition: avformat.h:907
mid_pred
#define mid_pred
Definition: mathops.h:98
AV_BUFFERSINK_FLAG_NO_REQUEST
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:95
av_buffer_allocz
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:93
decode_error_stat
static int64_t decode_error_stat[2]
Definition: ffmpeg.c:137
AVStream::id
int id
Format-specific stream ID.
Definition: avformat.h:850
AVFrame::decode_error_flags
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
Definition: frame.h:648
ret
ret
Definition: filter_design.txt:187
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:174
AVSubtitleRect::h
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:2306
abort_on_flags
int abort_on_flags
Definition: ffmpeg_opt.c:81
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
avcodec_flush_buffers
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal codec state / flush internal buffers.
Definition: avcodec.c:369
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
InputFilter::name
uint8_t * name
Definition: ffmpeg.h:275
av_strlcat
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes,...
Definition: avstring.c:96
VSYNC_DROP
@ VSYNC_DROP
Definition: ffmpeg.h:64
normalize.ifile
ifile
Definition: normalize.py:6
encode_frame
static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:877
OutputFilter::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg.h:310
AVStream::nb_side_data
int nb_side_data
The number of elements in the AVStream.side_data array.
Definition: avformat.h:960
AV_CODEC_PROP_TEXT_SUB
#define AV_CODEC_PROP_TEXT_SUB
Subtitle codec is text based.
Definition: codec_desc.h:102
InputFile::streams
InputStream ** streams
Definition: ffmpeg.h:474
AVCodecContext::opaque
void * opaque
Private data of the user, can be used to carry app specific stuff.
Definition: avcodec.h:468
InputStream::reinit_filters
int reinit_filters
Definition: ffmpeg.h:419
hw_device_free_all
void hw_device_free_all(void)
Definition: ffmpeg_hw.c:288
avformat.h
InputFile::eagain
int eagain
Definition: ffmpeg.h:455
adjust_frame_pts_to_encoder_tb
static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:685
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
dict.h
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:432
AV_LOG_SKIP_REPEATED
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:370
AV_DICT_MATCH_CASE
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:74
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
InputFilter::sample_rate
int sample_rate
Definition: ffmpeg.h:286
avio_printf
int avio_printf(AVIOContext *s, const char *fmt,...) av_printf_format(2
Writes a formatted string to the context.
ifile_close
void ifile_close(InputFile **f)
Definition: ffmpeg_demux.c:479
avformat_network_deinit
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:569
ifilter_parameters_from_frame
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
Definition: ffmpeg_filter.c:1275
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:28
AVCodecContext
main external API structure.
Definition: avcodec.h:426
AVFrame::height
int height
Definition: frame.h:402
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:844
transcode_init_done
static atomic_int transcode_init_done
Definition: ffmpeg.c:332
BenchmarkTimeStamps
Definition: ffmpeg.c:123
avformat_transfer_internal_stream_timing_info
int avformat_transfer_internal_stream_timing_info(const AVOutputFormat *ofmt, AVStream *ost, const AVStream *ist, enum AVTimebaseSource copy_tb)
Transfer internal timing information from one stream to another.
Definition: avformat.c:700
copy_av_subtitle
static int copy_av_subtitle(AVSubtitle *dst, AVSubtitle *src)
Definition: ffmpeg.c:2385
hw_device_setup_for_encode
int hw_device_setup_for_encode(OutputStream *ost)
Definition: ffmpeg_hw.c:445
channel_layout.h
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
InputFilter::format
int format
Definition: ffmpeg.h:281
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
Definition: avpacket.c:230
InputStream::prev_pkt_pts
int64_t prev_pkt_pts
Definition: ffmpeg.h:360
ts_discontinuity_detect
static void ts_discontinuity_detect(InputFile *ifile, InputStream *ist, AVPacket *pkt)
Definition: ffmpeg.c:3671
AVRational::den
int den
Denominator.
Definition: rational.h:60
KeyframeForceCtx::index
int index
Definition: ffmpeg.h:554
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
check_decode_result
static void check_decode_result(InputStream *ist, int *got_output, int ret)
Definition: ffmpeg.c:1947
avfilter.h
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:632
InputStream::prev_sub
struct InputStream::@3 prev_sub
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
AVStream::r_frame_rate
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:996
program_name
const char program_name[]
program name, defined by the program for show_version().
Definition: ffmpeg.c:111
InputStream::nb_samples
int64_t nb_samples
Definition: ffmpeg.h:387
InputFilter::height
int height
Definition: ffmpeg.h:283
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVPacket::stream_index
int stream_index
Definition: packet.h:376
avcodec_get_hw_config
const AVCodecHWConfig * avcodec_get_hw_config(const AVCodec *codec, int index)
Retrieve supported hardware configurations for a codec.
Definition: utils.c:884
InputFile::ts_offset
int64_t ts_offset
Definition: ffmpeg.h:462
InputStream::discard
int discard
Definition: ffmpeg.h:338
AVFilterContext
An instance of a filter.
Definition: avfilter.h:392
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1358
print_report
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
Definition: ffmpeg.c:1569
AV_CODEC_CAP_PARAM_CHANGE
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: codec.h:115
VSYNC_AUTO
@ VSYNC_AUTO
Definition: ffmpeg.h:59
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:639
OutputFilter
Definition: ffmpeg.h:295
InputStream::sub2video::frame
AVFrame * frame
Definition: ffmpeg.h:409
AVCodecContext::codec_type
enum AVMediaType codec_type
Definition: avcodec.h:434
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:270
desc
const char * desc
Definition: libsvtav1.c:83
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
av_buffersrc_get_nb_failed_requests
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:290
flush
void(* flush)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:367
start_at_zero
int start_at_zero
Definition: ffmpeg_opt.c:77
AVFMT_TS_DISCONT
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:481
OutputStream::file_index
int file_index
Definition: ffmpeg.h:565
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:321
OutputFilter::out_tmp
AVFilterInOut * out_tmp
Definition: ffmpeg.h:302
AVCodecParameters::video_delay
int video_delay
Video only.
Definition: codec_par.h:157
InputStream::sub2video::h
int h
Definition: ffmpeg.h:410
llrint
#define llrint(x)
Definition: libm.h:394
set_encoder_id
static void set_encoder_id(OutputFile *of, OutputStream *ost)
Definition: ffmpeg.c:2982
KeyframeForceCtx::pexpr
AVExpr * pexpr
Definition: ffmpeg.h:556
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:236
AVCodecParameters::format
int format
Definition: codec_par.h:86
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
InputFilter::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:289
ifilter_send_frame
static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_reference)
Definition: ffmpeg.c:1977
FKF_N_FORCED
@ FKF_N_FORCED
Definition: ffmpeg.h:489
AVDictionaryEntry
Definition: dict.h:89
InputStream::sub2video::end_pts
int64_t end_pts
Definition: ffmpeg.h:407
av_add_q
AVRational av_add_q(AVRational b, AVRational c)
Add two rationals.
Definition: rational.c:93
stdin_interaction
int stdin_interaction
Definition: ffmpeg_opt.c:84
AV_ROUND_PASS_MINMAX
@ AV_ROUND_PASS_MINMAX
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:108
AVCodecParameters::codec_id
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
Definition: codec_par.h:62
LastFrameDuration::duration
int64_t duration
Definition: ffmpeg.h:447
AVPacket
This structure stores compressed data.
Definition: packet.h:351
EncStatsComponent
Definition: ffmpeg.h:520
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:86
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
cmdutils.h
AV_FIELD_TB
@ AV_FIELD_TB
Top coded first, bottom displayed first.
Definition: codec_par.h:43
EncStats::nb_components
int nb_components
Definition: ffmpeg.h:529
InputStream::dts_buffer
int64_t * dts_buffer
Definition: ffmpeg.h:439
av_stream_new_side_data
uint8_t * av_stream_new_side_data(AVStream *st, enum AVPacketSideDataType type, size_t size)
Allocate new information from stream.
Definition: avformat.c:191
InputFilter::frame_queue
AVFifo * frame_queue
Definition: ffmpeg.h:278
nb_filtergraphs
int nb_filtergraphs
Definition: ffmpeg.c:150
AV_FIELD_BB
@ AV_FIELD_BB
Bottom coded first, bottom displayed first.
Definition: codec_par.h:42
d
d
Definition: ffmpeg_filter.c:156
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:598
int32_t
int32_t
Definition: audioconvert.c:56
imgutils.h
ENC_STATS_PKT_SIZE
@ ENC_STATS_PKT_SIZE
Definition: ffmpeg.h:515
AV_PKT_DATA_QUALITY_STATS
@ AV_PKT_DATA_QUALITY_STATS
This side data contains quality related information from the encoder.
Definition: packet.h:133
timestamp.h
OutputStream
Definition: mux.c:53
hwcontext.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
@ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
The codec supports this format via the hw_device_ctx interface.
Definition: codec.h:306
av_strlcpy
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:86
avfilter_graph_send_command
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
Definition: avfiltergraph.c:1185
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
dts_error_threshold
float dts_error_threshold
Definition: ffmpeg_opt.c:68
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
AVCodecHWConfig
Definition: codec.h:338
h
h
Definition: vp9dsp_template.c:2038
AVERROR_EXIT
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:58
avcodec_descriptor_get
const AVCodecDescriptor * avcodec_descriptor_get(enum AVCodecID id)
Definition: codec_desc.c:3664
OutputFile::format
const AVOutputFormat * format
Definition: ffmpeg.h:697
InputStream::nb_dts_buffer
int nb_dts_buffer
Definition: ffmpeg.h:440
InputStream::saw_first_ts
int saw_first_ts
Definition: ffmpeg.h:390
AVStream::start_time
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base.
Definition: avformat.h:887
set_tty_echo
static void set_tty_echo(int on)
Definition: ffmpeg.c:3502
avstring.h
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
InputFile::nb_streams
int nb_streams
Definition: ffmpeg.h:475
FKF_N
@ FKF_N
Definition: ffmpeg.h:488
AVStream::pts_wrap_bits
int pts_wrap_bits
Number of bits in timestamps.
Definition: avformat.h:1005
ENC_STATS_DTS_TIME
@ ENC_STATS_DTS_TIME
Definition: ffmpeg.h:512
OutputFile::recording_time
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:705
AV_FIELD_BT
@ AV_FIELD_BT
Bottom coded first, top displayed first.
Definition: codec_par.h:44
of_stream_init
int of_stream_init(OutputFile *of, OutputStream *ost)
Definition: ffmpeg_mux.c:583
VSYNC_PASSTHROUGH
@ VSYNC_PASSTHROUGH
Definition: ffmpeg.h:60
InputStream::dec
const AVCodec * dec
Definition: ffmpeg.h:354
snprintf
#define snprintf
Definition: snprintf.h:34
EncStats::io
AVIOContext * io
Definition: ffmpeg.h:531
ABORT_ON_FLAG_EMPTY_OUTPUT
#define ABORT_ON_FLAG_EMPTY_OUTPUT
Definition: ffmpeg.h:496
read_key
static int read_key(void)
Definition: ffmpeg.c:444
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
reap_filters
static int reap_filters(int flush)
Get and encode new output from any of the filtergraphs, without causing activity.
Definition: ffmpeg.c:1361
buffersrc.h
InputStream::subtitle
AVSubtitle subtitle
Definition: ffmpeg.h:402
of_write_trailer
int of_write_trailer(OutputFile *of)
Definition: ffmpeg_mux.c:604
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:795
av_rescale_q_rnd
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:42
filtergraph_is_simple
int filtergraph_is_simple(FilterGraph *fg)
Definition: ffmpeg_filter.c:1307
init_encoder_time_base
static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
Definition: ffmpeg.c:3005
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:67
read
static uint32_t BS_FUNC() read(BSCTX *bc, unsigned int n)
Return n bits from the buffer, n has to be in the 0-32 range.
Definition: bitstream_template.h:231
ENC_STATS_TIMEBASE
@ ENC_STATS_TIMEBASE
Definition: ffmpeg.h:505
AVPacket::time_base
AVRational time_base
Time base of the packet's timestamps.
Definition: packet.h:418
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:289
OutputFile
Definition: ffmpeg.h:692
InputStream::autorotate
int autorotate
Definition: ffmpeg.h:396
avdevice_register_all
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:65