FFmpeg
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavcodec/version.h"
68 #include "libavformat/os_support.h"
69 
70 # include "libavfilter/avfilter.h"
71 # include "libavfilter/buffersrc.h"
72 # include "libavfilter/buffersink.h"
73 
74 #if HAVE_SYS_RESOURCE_H
75 #include <sys/time.h>
76 #include <sys/types.h>
77 #include <sys/resource.h>
78 #elif HAVE_GETPROCESSTIMES
79 #include <windows.h>
80 #endif
81 #if HAVE_GETPROCESSMEMORYINFO
82 #include <windows.h>
83 #include <psapi.h>
84 #endif
85 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <windows.h>
87 #endif
88 
89 
90 #if HAVE_SYS_SELECT_H
91 #include <sys/select.h>
92 #endif
93 
94 #if HAVE_TERMIOS_H
95 #include <fcntl.h>
96 #include <sys/ioctl.h>
97 #include <sys/time.h>
98 #include <termios.h>
99 #elif HAVE_KBHIT
100 #include <conio.h>
101 #endif
102 
103 #include <time.h>
104 
105 #include "ffmpeg.h"
106 #include "cmdutils.h"
107 #include "sync_queue.h"
108 
109 #include "libavutil/avassert.h"
110 
111 const char program_name[] = "ffmpeg";
112 const int program_birth_year = 2000;
113 
114 static FILE *vstats_file;
115 
116 typedef struct BenchmarkTimeStamps {
117  int64_t real_usec;
118  int64_t user_usec;
119  int64_t sys_usec;
121 
123 static int64_t getmaxrss(void);
125 
126 static int64_t nb_frames_dup = 0;
127 static uint64_t dup_warning = 1000;
128 static int64_t nb_frames_drop = 0;
129 static int64_t decode_error_stat[2];
130 unsigned nb_output_dumped = 0;
131 
134 
137 
140 
143 
144 #if HAVE_TERMIOS_H
145 
146 /* init terminal so that we can grab keys */
147 static struct termios oldtty;
148 static int restore_tty;
149 #endif
150 
151 /* sub2video hack:
152  Convert subtitles to video with alpha to insert them in filter graphs.
153  This is a temporary solution until libavfilter gets real subtitles support.
154  */
155 
157 {
158  int ret;
159  AVFrame *frame = ist->sub2video.frame;
160 
162  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
163  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
165  if ((ret = av_frame_get_buffer(frame, 0)) < 0)
166  return ret;
167  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
168  return 0;
169 }
170 
171 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
172  AVSubtitleRect *r)
173 {
174  uint32_t *pal, *dst2;
175  uint8_t *src, *src2;
176  int x, y;
177 
178  if (r->type != SUBTITLE_BITMAP) {
179  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
180  return;
181  }
182  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
183  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
184  r->x, r->y, r->w, r->h, w, h
185  );
186  return;
187  }
188 
189  dst += r->y * dst_linesize + r->x * 4;
190  src = r->data[0];
191  pal = (uint32_t *)r->data[1];
192  for (y = 0; y < r->h; y++) {
193  dst2 = (uint32_t *)dst;
194  src2 = src;
195  for (x = 0; x < r->w; x++)
196  *(dst2++) = pal[*(src2++)];
197  dst += dst_linesize;
198  src += r->linesize[0];
199  }
200 }
201 
202 static void sub2video_push_ref(InputStream *ist, int64_t pts)
203 {
204  AVFrame *frame = ist->sub2video.frame;
205  int i;
206  int ret;
207 
208  av_assert1(frame->data[0]);
209  ist->sub2video.last_pts = frame->pts = pts;
210  for (i = 0; i < ist->nb_filters; i++) {
214  if (ret != AVERROR_EOF && ret < 0)
215  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
216  av_err2str(ret));
217  }
218 }
219 
220 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
221 {
222  AVFrame *frame = ist->sub2video.frame;
223  int8_t *dst;
224  int dst_linesize;
225  int num_rects, i;
226  int64_t pts, end_pts;
227 
228  if (!frame)
229  return;
230  if (sub) {
231  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
232  AV_TIME_BASE_Q, ist->st->time_base);
233  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
234  AV_TIME_BASE_Q, ist->st->time_base);
235  num_rects = sub->num_rects;
236  } else {
237  /* If we are initializing the system, utilize current heartbeat
238  PTS as the start time, and show until the following subpicture
239  is received. Otherwise, utilize the previous subpicture's end time
240  as the fall-back value. */
241  pts = ist->sub2video.initialize ?
242  heartbeat_pts : ist->sub2video.end_pts;
243  end_pts = INT64_MAX;
244  num_rects = 0;
245  }
246  if (sub2video_get_blank_frame(ist) < 0) {
248  "Impossible to get a blank canvas.\n");
249  return;
250  }
251  dst = frame->data [0];
252  dst_linesize = frame->linesize[0];
253  for (i = 0; i < num_rects; i++)
254  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
255  sub2video_push_ref(ist, pts);
256  ist->sub2video.end_pts = end_pts;
257  ist->sub2video.initialize = 0;
258 }
259 
260 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
261 {
262  InputFile *infile = input_files[ist->file_index];
263  int i, j, nb_reqs;
264  int64_t pts2;
265 
266  /* When a frame is read from a file, examine all sub2video streams in
267  the same file and send the sub2video frame again. Otherwise, decoded
268  video frames could be accumulating in the filter graph while a filter
269  (possibly overlay) is desperately waiting for a subtitle frame. */
270  for (i = 0; i < infile->nb_streams; i++) {
271  InputStream *ist2 = infile->streams[i];
272  if (!ist2->sub2video.frame)
273  continue;
274  /* subtitles seem to be usually muxed ahead of other streams;
275  if not, subtracting a larger time here is necessary */
276  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
277  /* do not send the heartbeat frame if the subtitle is already ahead */
278  if (pts2 <= ist2->sub2video.last_pts)
279  continue;
280  if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
281  /* if we have hit the end of the current displayed subpicture,
282  or if we need to initialize the system, update the
283  overlayed subpicture and its start/end times */
284  sub2video_update(ist2, pts2 + 1, NULL);
285  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
286  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
287  if (nb_reqs)
288  sub2video_push_ref(ist2, pts2);
289  }
290 }
291 
292 static void sub2video_flush(InputStream *ist)
293 {
294  int i;
295  int ret;
296 
297  if (ist->sub2video.end_pts < INT64_MAX)
298  sub2video_update(ist, INT64_MAX, NULL);
299  for (i = 0; i < ist->nb_filters; i++) {
301  if (ret != AVERROR_EOF && ret < 0)
302  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
303  }
304 }
305 
306 /* end of sub2video hack */
307 
308 static void term_exit_sigsafe(void)
309 {
310 #if HAVE_TERMIOS_H
311  if(restore_tty)
312  tcsetattr (0, TCSANOW, &oldtty);
313 #endif
314 }
315 
316 void term_exit(void)
317 {
318  av_log(NULL, AV_LOG_QUIET, "%s", "");
320 }
321 
322 static volatile int received_sigterm = 0;
323 static volatile int received_nb_signals = 0;
325 static volatile int ffmpeg_exited = 0;
328 
329 static void
331 {
332  int ret;
333  received_sigterm = sig;
336  if(received_nb_signals > 3) {
337  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
338  strlen("Received > 3 system signals, hard exiting\n"));
339  if (ret < 0) { /* Do nothing */ };
340  exit(123);
341  }
342 }
343 
344 #if HAVE_SETCONSOLECTRLHANDLER
345 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
346 {
347  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
348 
349  switch (fdwCtrlType)
350  {
351  case CTRL_C_EVENT:
352  case CTRL_BREAK_EVENT:
353  sigterm_handler(SIGINT);
354  return TRUE;
355 
356  case CTRL_CLOSE_EVENT:
357  case CTRL_LOGOFF_EVENT:
358  case CTRL_SHUTDOWN_EVENT:
359  sigterm_handler(SIGTERM);
360  /* Basically, with these 3 events, when we return from this method the
361  process is hard terminated, so stall as long as we need to
362  to try and let the main thread(s) clean up and gracefully terminate
363  (we have at most 5 seconds, but should be done far before that). */
364  while (!ffmpeg_exited) {
365  Sleep(0);
366  }
367  return TRUE;
368 
369  default:
370  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
371  return FALSE;
372  }
373 }
374 #endif
375 
376 #ifdef __linux__
377 #define SIGNAL(sig, func) \
378  do { \
379  action.sa_handler = func; \
380  sigaction(sig, &action, NULL); \
381  } while (0)
382 #else
383 #define SIGNAL(sig, func) \
384  signal(sig, func)
385 #endif
386 
387 void term_init(void)
388 {
389 #if defined __linux__
390  struct sigaction action = {0};
391  action.sa_handler = sigterm_handler;
392 
393  /* block other interrupts while processing this one */
394  sigfillset(&action.sa_mask);
395 
396  /* restart interruptible functions (i.e. don't fail with EINTR) */
397  action.sa_flags = SA_RESTART;
398 #endif
399 
400 #if HAVE_TERMIOS_H
401  if (stdin_interaction) {
402  struct termios tty;
403  if (tcgetattr (0, &tty) == 0) {
404  oldtty = tty;
405  restore_tty = 1;
406 
407  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
408  |INLCR|IGNCR|ICRNL|IXON);
409  tty.c_oflag |= OPOST;
410  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
411  tty.c_cflag &= ~(CSIZE|PARENB);
412  tty.c_cflag |= CS8;
413  tty.c_cc[VMIN] = 1;
414  tty.c_cc[VTIME] = 0;
415 
416  tcsetattr (0, TCSANOW, &tty);
417  }
418  SIGNAL(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
419  }
420 #endif
421 
422  SIGNAL(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
423  SIGNAL(SIGTERM, sigterm_handler); /* Termination (ANSI). */
424 #ifdef SIGXCPU
425  SIGNAL(SIGXCPU, sigterm_handler);
426 #endif
427 #ifdef SIGPIPE
428  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
429 #endif
430 #if HAVE_SETCONSOLECTRLHANDLER
431  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
432 #endif
433 }
434 
435 /* read a key without blocking */
436 static int read_key(void)
437 {
438  unsigned char ch;
439 #if HAVE_TERMIOS_H
440  int n = 1;
441  struct timeval tv;
442  fd_set rfds;
443 
444  FD_ZERO(&rfds);
445  FD_SET(0, &rfds);
446  tv.tv_sec = 0;
447  tv.tv_usec = 0;
448  n = select(1, &rfds, NULL, NULL, &tv);
449  if (n > 0) {
450  n = read(0, &ch, 1);
451  if (n == 1)
452  return ch;
453 
454  return n;
455  }
456 #elif HAVE_KBHIT
457 # if HAVE_PEEKNAMEDPIPE
458  static int is_pipe;
459  static HANDLE input_handle;
460  DWORD dw, nchars;
461  if(!input_handle){
462  input_handle = GetStdHandle(STD_INPUT_HANDLE);
463  is_pipe = !GetConsoleMode(input_handle, &dw);
464  }
465 
466  if (is_pipe) {
467  /* When running under a GUI, you will end here. */
468  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
469  // input pipe may have been closed by the program that ran ffmpeg
470  return -1;
471  }
472  //Read it
473  if(nchars != 0) {
474  read(0, &ch, 1);
475  return ch;
476  }else{
477  return -1;
478  }
479  }
480 # endif
481  if(kbhit())
482  return(getch());
483 #endif
484  return -1;
485 }
486 
487 static int decode_interrupt_cb(void *ctx)
488 {
490 }
491 
493 
494 static void ffmpeg_cleanup(int ret)
495 {
496  int i, j;
497 
498  if (do_benchmark) {
499  int maxrss = getmaxrss() / 1024;
500  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
501  }
502 
503  for (i = 0; i < nb_filtergraphs; i++) {
504  FilterGraph *fg = filtergraphs[i];
506  for (j = 0; j < fg->nb_inputs; j++) {
507  InputFilter *ifilter = fg->inputs[j];
508  struct InputStream *ist = ifilter->ist;
509 
510  if (ifilter->frame_queue) {
511  AVFrame *frame;
512  while (av_fifo_read(ifilter->frame_queue, &frame, 1) >= 0)
514  av_fifo_freep2(&ifilter->frame_queue);
515  }
516  av_freep(&ifilter->displaymatrix);
517  if (ist->sub2video.sub_queue) {
518  AVSubtitle sub;
519  while (av_fifo_read(ist->sub2video.sub_queue, &sub, 1) >= 0)
522  }
523  av_buffer_unref(&ifilter->hw_frames_ctx);
524  av_freep(&ifilter->name);
525  av_freep(&fg->inputs[j]);
526  }
527  av_freep(&fg->inputs);
528  for (j = 0; j < fg->nb_outputs; j++) {
529  OutputFilter *ofilter = fg->outputs[j];
530 
531  avfilter_inout_free(&ofilter->out_tmp);
532  av_freep(&ofilter->name);
534  av_freep(&fg->outputs[j]);
535  }
536  av_freep(&fg->outputs);
537  av_freep(&fg->graph_desc);
538 
540  }
542 
543  /* close files */
544  for (i = 0; i < nb_output_files; i++)
546 
547  for (i = 0; i < nb_input_files; i++)
549 
550  if (vstats_file) {
551  if (fclose(vstats_file))
553  "Error closing vstats file, loss of information possible: %s\n",
554  av_err2str(AVERROR(errno)));
555  }
558 
561 
562  uninit_opts();
563 
565 
566  if (received_sigterm) {
567  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
568  (int) received_sigterm);
569  } else if (ret && atomic_load(&transcode_init_done)) {
570  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
571  }
572  term_exit();
573  ffmpeg_exited = 1;
574 }
575 
576 /* iterate over all output streams in all output files;
577  * pass NULL to start iteration */
579 {
580  int of_idx = prev ? prev->file_index : 0;
581  int ost_idx = prev ? prev->index + 1 : 0;
582 
583  for (; of_idx < nb_output_files; of_idx++) {
584  OutputFile *of = output_files[of_idx];
585  if (ost_idx < of->nb_streams)
586  return of->streams[ost_idx];
587 
588  ost_idx = 0;
589  }
590 
591  return NULL;
592 }
593 
595 {
596  int if_idx = prev ? prev->file_index : 0;
597  int ist_idx = prev ? prev->st->index + 1 : 0;
598 
599  for (; if_idx < nb_input_files; if_idx++) {
600  InputFile *f = input_files[if_idx];
601  if (ist_idx < f->nb_streams)
602  return f->streams[ist_idx];
603 
604  ist_idx = 0;
605  }
606 
607  return NULL;
608 }
609 
611 {
612  const AVDictionaryEntry *t = NULL;
613 
614  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
616  }
617 }
618 
620 {
621  const AVDictionaryEntry *t;
622  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
623  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
624  exit_program(1);
625  }
626 }
627 
628 static void abort_codec_experimental(const AVCodec *c, int encoder)
629 {
630  exit_program(1);
631 }
632 
633 static void update_benchmark(const char *fmt, ...)
634 {
635  if (do_benchmark_all) {
637  va_list va;
638  char buf[1024];
639 
640  if (fmt) {
641  va_start(va, fmt);
642  vsnprintf(buf, sizeof(buf), fmt, va);
643  va_end(va);
645  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
648  t.real_usec - current_time.real_usec, buf);
649  }
650  current_time = t;
651  }
652 }
653 
655 {
656  OutputFile *of = output_files[ost->file_index];
657  ost->finished |= ENCODER_FINISHED;
658 
659  if (ost->sq_idx_encode >= 0)
660  sq_send(of->sq_encode, ost->sq_idx_encode, SQFRAME(NULL));
661 }
662 
664 {
665  OutputFile *of = output_files[ost->file_index];
666 
667  if (of->recording_time != INT64_MAX &&
668  av_compare_ts(ts, tb, of->recording_time, AV_TIME_BASE_Q) >= 0) {
670  return 0;
671  }
672  return 1;
673 }
674 
676  AVFrame *frame)
677 {
678  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
679  const int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ?
680  0 : of->start_time;
681 
682  AVCodecContext *const enc = ost->enc_ctx;
683 
684  AVRational tb = enc->time_base;
685  AVRational filter_tb = frame->time_base;
686  const int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
687 
688  if (frame->pts == AV_NOPTS_VALUE)
689  goto early_exit;
690 
691  tb.den <<= extra_bits;
692  float_pts = av_rescale_q(frame->pts, filter_tb, tb) -
694  float_pts /= 1 << extra_bits;
695  // avoid exact midoints to reduce the chance of rounding differences, this
696  // can be removed in case the fps code is changed to work with integers
697  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
698 
699  frame->pts = av_rescale_q(frame->pts, filter_tb, enc->time_base) -
700  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
701  frame->time_base = enc->time_base;
702 
703 early_exit:
704 
705  if (debug_ts) {
706  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
707  frame ? av_ts2str(frame->pts) : "NULL",
708  (enc && frame) ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
709  float_pts,
710  enc ? enc->time_base.num : -1,
711  enc ? enc->time_base.den : -1);
712  }
713 
714  return float_pts;
715 }
716 
718  char *error, int error_len);
719 
721  unsigned int fatal)
722 {
723  int ret = AVERROR_BUG;
724  char error[1024] = {0};
725 
726  if (ost->initialized)
727  return 0;
728 
729  ret = init_output_stream(ost, frame, error, sizeof(error));
730  if (ret < 0) {
731  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
732  ost->file_index, ost->index, error);
733 
734  if (fatal)
735  exit_program(1);
736  }
737 
738  return ret;
739 }
740 
741 static double psnr(double d)
742 {
743  return -10.0 * log10(d);
744 }
745 
746 static void update_video_stats(OutputStream *ost, const AVPacket *pkt, int write_vstats)
747 {
749  NULL);
750  AVCodecContext *enc = ost->enc_ctx;
751  int64_t frame_number;
752  double ti1, bitrate, avg_bitrate;
753 
754  ost->quality = sd ? AV_RL32(sd) : -1;
755  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
756 
757  for (int i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
758  if (sd && i < sd[5])
759  ost->error[i] = AV_RL64(sd + 8 + 8*i);
760  else
761  ost->error[i] = -1;
762  }
763 
764  if (!write_vstats)
765  return;
766 
767  /* this is executed just the first time update_video_stats is called */
768  if (!vstats_file) {
769  vstats_file = fopen(vstats_filename, "w");
770  if (!vstats_file) {
771  perror("fopen");
772  exit_program(1);
773  }
774  }
775 
776  frame_number = ost->packets_encoded;
777  if (vstats_version <= 1) {
778  fprintf(vstats_file, "frame= %5"PRId64" q= %2.1f ", frame_number,
779  ost->quality / (float)FF_QP2LAMBDA);
780  } else {
781  fprintf(vstats_file, "out= %2d st= %2d frame= %5"PRId64" q= %2.1f ", ost->file_index, ost->index, frame_number,
782  ost->quality / (float)FF_QP2LAMBDA);
783  }
784 
785  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
786  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
787 
788  fprintf(vstats_file,"f_size= %6d ", pkt->size);
789  /* compute pts value */
790  ti1 = pkt->dts * av_q2d(ost->mux_timebase);
791  if (ti1 < 0.01)
792  ti1 = 0.01;
793 
794  bitrate = (pkt->size * 8) / av_q2d(enc->time_base) / 1000.0;
795  avg_bitrate = (double)(ost->data_size_enc * 8) / ti1 / 1000.0;
796  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
797  (double)ost->data_size_enc / 1024, ti1, bitrate, avg_bitrate);
798  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
799 }
800 
802 {
803  AVCodecContext *enc = ost->enc_ctx;
804  AVPacket *pkt = ost->pkt;
805  const char *type_desc = av_get_media_type_string(enc->codec_type);
806  const char *action = frame ? "encode" : "flush";
807  int ret;
808 
809  if (frame) {
810  ost->frames_encoded++;
811  ost->samples_encoded += frame->nb_samples;
812 
813  if (debug_ts) {
814  av_log(NULL, AV_LOG_INFO, "encoder <- type:%s "
815  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
816  type_desc,
817  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
818  enc->time_base.num, enc->time_base.den);
819  }
820  }
821 
823 
824  ret = avcodec_send_frame(enc, frame);
825  if (ret < 0 && !(ret == AVERROR_EOF && !frame)) {
826  av_log(NULL, AV_LOG_ERROR, "Error submitting %s frame to the encoder\n",
827  type_desc);
828  return ret;
829  }
830 
831  while (1) {
833  update_benchmark("%s_%s %d.%d", action, type_desc,
834  ost->file_index, ost->index);
835 
836  /* if two pass, output log on success and EOF */
837  if ((ret >= 0 || ret == AVERROR_EOF) && ost->logfile && enc->stats_out)
838  fprintf(ost->logfile, "%s", enc->stats_out);
839 
840  if (ret == AVERROR(EAGAIN)) {
841  av_assert0(frame); // should never happen during flushing
842  return 0;
843  } else if (ret == AVERROR_EOF) {
844  of_output_packet(of, pkt, ost, 1);
845  return ret;
846  } else if (ret < 0) {
847  av_log(NULL, AV_LOG_ERROR, "%s encoding failed\n", type_desc);
848  return ret;
849  }
850 
851  if (debug_ts) {
852  av_log(NULL, AV_LOG_INFO, "encoder -> type:%s "
853  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s "
854  "duration:%s duration_time:%s\n",
855  type_desc,
859  }
860 
861  av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
862 
863  if (debug_ts) {
864  av_log(NULL, AV_LOG_INFO, "encoder -> type:%s "
865  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s "
866  "duration:%s duration_time:%s\n",
867  type_desc,
871  }
872 
873  ost->data_size_enc += pkt->size;
874 
875  if (enc->codec_type == AVMEDIA_TYPE_VIDEO)
877 
878  ost->packets_encoded++;
879 
880  of_output_packet(of, pkt, ost, 0);
881  }
882 
883  av_assert0(0);
884 }
885 
887  AVFrame *frame)
888 {
889  int ret;
890 
891  if (ost->sq_idx_encode < 0)
892  return encode_frame(of, ost, frame);
893 
894  if (frame) {
895  ret = av_frame_ref(ost->sq_frame, frame);
896  if (ret < 0)
897  return ret;
898  frame = ost->sq_frame;
899  }
900 
901  ret = sq_send(of->sq_encode, ost->sq_idx_encode,
902  SQFRAME(frame));
903  if (ret < 0) {
904  if (frame)
906  if (ret != AVERROR_EOF)
907  return ret;
908  }
909 
910  while (1) {
911  AVFrame *enc_frame = ost->sq_frame;
912 
913  ret = sq_receive(of->sq_encode, ost->sq_idx_encode,
914  SQFRAME(enc_frame));
915  if (ret == AVERROR_EOF) {
916  enc_frame = NULL;
917  } else if (ret < 0) {
918  return (ret == AVERROR(EAGAIN)) ? 0 : ret;
919  }
920 
921  ret = encode_frame(of, ost, enc_frame);
922  if (enc_frame)
923  av_frame_unref(enc_frame);
924  if (ret < 0) {
925  if (ret == AVERROR_EOF)
927  return ret;
928  }
929  }
930 }
931 
933  AVFrame *frame)
934 {
935  AVCodecContext *enc = ost->enc_ctx;
936  int ret;
937 
938  if (frame->pts == AV_NOPTS_VALUE)
939  frame->pts = ost->next_pts;
940  else {
941  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
942  frame->pts =
943  av_rescale_q(frame->pts, frame->time_base, enc->time_base) -
945  }
946  frame->time_base = enc->time_base;
947 
948  if (!check_recording_time(ost, frame->pts, frame->time_base))
949  return;
950 
951  ost->next_pts = frame->pts + frame->nb_samples;
952 
954  if (ret < 0 && ret != AVERROR_EOF)
955  exit_program(1);
956 }
957 
958 static void do_subtitle_out(OutputFile *of,
959  OutputStream *ost,
960  AVSubtitle *sub)
961 {
962  int subtitle_out_max_size = 1024 * 1024;
963  int subtitle_out_size, nb, i, ret;
964  AVCodecContext *enc;
965  AVPacket *pkt = ost->pkt;
966  int64_t pts;
967 
968  if (sub->pts == AV_NOPTS_VALUE) {
969  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
970  if (exit_on_error)
971  exit_program(1);
972  return;
973  }
974 
975  enc = ost->enc_ctx;
976 
977  /* Note: DVB subtitle need one packet to draw them and one other
978  packet to clear them */
979  /* XXX: signal it in the codec context ? */
981  nb = 2;
982  else
983  nb = 1;
984 
985  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
986  pts = sub->pts;
987  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
988  pts -= output_files[ost->file_index]->start_time;
989  for (i = 0; i < nb; i++) {
990  unsigned save_num_rects = sub->num_rects;
991 
993  return;
994 
995  ret = av_new_packet(pkt, subtitle_out_max_size);
996  if (ret < 0)
997  report_and_exit(AVERROR(ENOMEM));
998 
999  sub->pts = pts;
1000  // start_display_time is required to be 0
1001  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1002  sub->end_display_time -= sub->start_display_time;
1003  sub->start_display_time = 0;
1004  if (i == 1)
1005  sub->num_rects = 0;
1006 
1007  ost->frames_encoded++;
1008 
1009  subtitle_out_size = avcodec_encode_subtitle(enc, pkt->data, pkt->size, sub);
1010  if (i == 1)
1011  sub->num_rects = save_num_rects;
1012  if (subtitle_out_size < 0) {
1013  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1014  exit_program(1);
1015  }
1016 
1017  av_shrink_packet(pkt, subtitle_out_size);
1018  pkt->pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1019  pkt->duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1020  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1021  /* XXX: the pts correction is handled here. Maybe handling
1022  it in the codec would be better */
1023  if (i == 0)
1024  pkt->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1025  else
1026  pkt->pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1027  }
1028  pkt->dts = pkt->pts;
1029  of_output_packet(of, pkt, ost, 0);
1030  }
1031 }
1032 
1034  const AVFrame *in_picture, int dup_idx)
1035 {
1036  double pts_time;
1037 
1038  if (kf->ref_pts == AV_NOPTS_VALUE)
1039  kf->ref_pts = in_picture->pts;
1040 
1041  pts_time = (in_picture->pts - kf->ref_pts) * av_q2d(tb);
1042  if (kf->index < kf->nb_pts &&
1043  av_compare_ts(in_picture->pts, tb, kf->pts[kf->index], AV_TIME_BASE_Q) >= 0) {
1044  kf->index++;
1045  goto force_keyframe;
1046  } else if (kf->pexpr) {
1047  double res;
1048  kf->expr_const_values[FKF_T] = pts_time;
1049  res = av_expr_eval(kf->pexpr,
1050  kf->expr_const_values, NULL);
1051  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1052  kf->expr_const_values[FKF_N],
1055  kf->expr_const_values[FKF_T],
1057  res);
1058 
1059  kf->expr_const_values[FKF_N] += 1;
1060 
1061  if (res) {
1064  kf->expr_const_values[FKF_N_FORCED] += 1;
1065  goto force_keyframe;
1066  }
1067  } else if (kf->type == KF_FORCE_SOURCE &&
1068  in_picture->key_frame == 1 && !dup_idx) {
1069  goto force_keyframe;
1070  } else if (kf->type == KF_FORCE_SOURCE_NO_DROP && !dup_idx) {
1071  kf->dropped_keyframe = 0;
1072  if ((in_picture->key_frame == 1) || kf->dropped_keyframe)
1073  goto force_keyframe;
1074  }
1075 
1076  return AV_PICTURE_TYPE_NONE;
1077 
1078 force_keyframe:
1079  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1080  return AV_PICTURE_TYPE_I;
1081 }
1082 
1083 /* May modify/reset next_picture */
1084 static void do_video_out(OutputFile *of,
1085  OutputStream *ost,
1086  AVFrame *next_picture)
1087 {
1088  int ret;
1089  AVCodecContext *enc = ost->enc_ctx;
1090  AVRational frame_rate;
1091  int64_t nb_frames, nb0_frames, i;
1092  double delta, delta0;
1093  double duration = 0;
1094  InputStream *ist = ost->ist;
1095  AVFilterContext *filter = ost->filter->filter;
1096 
1097  init_output_stream_wrapper(ost, next_picture, 1);
1098 
1099  frame_rate = av_buffersink_get_frame_rate(filter);
1100  if (frame_rate.num > 0 && frame_rate.den > 0)
1101  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1102 
1103  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1104  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1105 
1106  if (!ost->filters_script &&
1107  !ost->filters &&
1108  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1109  next_picture &&
1110  ist &&
1111  lrintf(next_picture->duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1112  duration = lrintf(next_picture->duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1113  }
1114 
1115  if (!next_picture) {
1116  //end, flushing
1117  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1118  ost->last_nb0_frames[1],
1119  ost->last_nb0_frames[2]);
1120  } else {
1121  double sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1122  /* delta0 is the "drift" between the input frame (next_picture) and
1123  * where it would fall in the output. */
1124  delta0 = sync_ipts - ost->next_pts;
1125  delta = delta0 + duration;
1126 
1127  /* by default, we output a single frame */
1128  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1129  nb_frames = 1;
1130 
1131  if (delta0 < 0 &&
1132  delta > 0 &&
1133  ost->vsync_method != VSYNC_PASSTHROUGH &&
1134  ost->vsync_method != VSYNC_DROP) {
1135  if (delta0 < -0.6) {
1136  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1137  } else
1138  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1139  sync_ipts = ost->next_pts;
1140  duration += delta0;
1141  delta0 = 0;
1142  }
1143 
1144  switch (ost->vsync_method) {
1145  case VSYNC_VSCFR:
1146  if (ost->vsync_frame_number == 0 && delta0 >= 0.5) {
1147  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1148  delta = duration;
1149  delta0 = 0;
1150  ost->next_pts = llrint(sync_ipts);
1151  }
1152  case VSYNC_CFR:
1153  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1154  if (frame_drop_threshold && delta < frame_drop_threshold && ost->vsync_frame_number) {
1155  nb_frames = 0;
1156  } else if (delta < -1.1)
1157  nb_frames = 0;
1158  else if (delta > 1.1) {
1159  nb_frames = llrintf(delta);
1160  if (delta0 > 1.1)
1161  nb0_frames = llrintf(delta0 - 0.6);
1162  }
1163  next_picture->duration = 1;
1164  break;
1165  case VSYNC_VFR:
1166  if (delta <= -0.6)
1167  nb_frames = 0;
1168  else if (delta > 0.6)
1169  ost->next_pts = llrint(sync_ipts);
1170  next_picture->duration = duration;
1171  break;
1172  case VSYNC_DROP:
1173  case VSYNC_PASSTHROUGH:
1174  next_picture->duration = duration;
1175  ost->next_pts = llrint(sync_ipts);
1176  break;
1177  default:
1178  av_assert0(0);
1179  }
1180  }
1181 
1182  memmove(ost->last_nb0_frames + 1,
1183  ost->last_nb0_frames,
1184  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1185  ost->last_nb0_frames[0] = nb0_frames;
1186 
1187  if (nb0_frames == 0 && ost->last_dropped) {
1188  nb_frames_drop++;
1190  "*** dropping frame %"PRId64" from stream %d at ts %"PRId64"\n",
1191  ost->vsync_frame_number, ost->st->index, ost->last_frame->pts);
1192  }
1193  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1194  if (nb_frames > dts_error_threshold * 30) {
1195  av_log(NULL, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", nb_frames - 1);
1196  nb_frames_drop++;
1197  return;
1198  }
1199  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1200  av_log(NULL, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", nb_frames - 1);
1201  if (nb_frames_dup > dup_warning) {
1202  av_log(NULL, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", dup_warning);
1203  dup_warning *= 10;
1204  }
1205  }
1206  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1207  ost->kf.dropped_keyframe = ost->last_dropped && next_picture && next_picture->key_frame;
1208 
1209  /* duplicates frame if needed */
1210  for (i = 0; i < nb_frames; i++) {
1211  AVFrame *in_picture;
1212 
1213  if (i < nb0_frames && ost->last_frame->buf[0]) {
1214  in_picture = ost->last_frame;
1215  } else
1216  in_picture = next_picture;
1217 
1218  if (!in_picture)
1219  return;
1220 
1221  in_picture->pts = ost->next_pts;
1222 
1223  if (!check_recording_time(ost, in_picture->pts, ost->enc_ctx->time_base))
1224  return;
1225 
1226  in_picture->quality = enc->global_quality;
1227  in_picture->pict_type = forced_kf_apply(&ost->kf, enc->time_base, in_picture, i);
1228 
1229  ret = submit_encode_frame(of, ost, in_picture);
1230  if (ret == AVERROR_EOF)
1231  break;
1232  else if (ret < 0)
1233  exit_program(1);
1234 
1235  ost->next_pts++;
1236  ost->vsync_frame_number++;
1237  }
1238 
1239  av_frame_unref(ost->last_frame);
1240  if (next_picture)
1241  av_frame_move_ref(ost->last_frame, next_picture);
1242 }
1243 
1244 /**
1245  * Get and encode new output from any of the filtergraphs, without causing
1246  * activity.
1247  *
1248  * @return 0 for success, <0 for severe errors
1249  */
1250 static int reap_filters(int flush)
1251 {
1252  AVFrame *filtered_frame = NULL;
1253 
1254  /* Reap all buffers present in the buffer sinks */
1255  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
1256  OutputFile *of = output_files[ost->file_index];
1258  AVCodecContext *enc = ost->enc_ctx;
1259  int ret = 0;
1260 
1261  if (!ost->filter || !ost->filter->graph->graph)
1262  continue;
1263  filter = ost->filter->filter;
1264 
1265  /*
1266  * Unlike video, with audio the audio frame size matters.
1267  * Currently we are fully reliant on the lavfi filter chain to
1268  * do the buffering deed for us, and thus the frame size parameter
1269  * needs to be set accordingly. Where does one get the required
1270  * frame size? From the initialized AVCodecContext of an audio
1271  * encoder. Thus, if we have gotten to an audio stream, initialize
1272  * the encoder earlier than receiving the first AVFrame.
1273  */
1276 
1277  filtered_frame = ost->filtered_frame;
1278 
1279  while (1) {
1280  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1282  if (ret < 0) {
1283  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1285  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1286  } else if (flush && ret == AVERROR_EOF) {
1288  do_video_out(of, ost, NULL);
1289  }
1290  break;
1291  }
1292  if (ost->finished) {
1293  av_frame_unref(filtered_frame);
1294  continue;
1295  }
1296 
1297  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1299  ost->last_filter_pts = av_rescale_q(filtered_frame->pts, tb,
1300  AV_TIME_BASE_Q);
1301  filtered_frame->time_base = tb;
1302 
1303  if (debug_ts)
1304  av_log(NULL, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
1305  av_ts2str(filtered_frame->pts),
1306  av_ts2timestr(filtered_frame->pts, &tb),
1307  tb.num, tb.den);
1308  }
1309 
1310  switch (av_buffersink_get_type(filter)) {
1311  case AVMEDIA_TYPE_VIDEO:
1312  if (!ost->frame_aspect_ratio.num)
1313  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1314 
1315  do_video_out(of, ost, filtered_frame);
1316  break;
1317  case AVMEDIA_TYPE_AUDIO:
1318  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1319  enc->ch_layout.nb_channels != filtered_frame->ch_layout.nb_channels) {
1321  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1322  break;
1323  }
1324  do_audio_out(of, ost, filtered_frame);
1325  break;
1326  default:
1327  // TODO support subtitle filters
1328  av_assert0(0);
1329  }
1330 
1331  av_frame_unref(filtered_frame);
1332  }
1333  }
1334 
1335  return 0;
1336 }
1337 
1338 static void print_final_stats(int64_t total_size)
1339 {
1340  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1341  uint64_t subtitle_size = 0;
1342  uint64_t data_size = 0;
1343  float percent = -1.0;
1344  int i, j;
1345  int pass1_used = 1;
1346 
1347  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
1349  const uint64_t s = ost->data_size_mux;
1350 
1351  switch (par->codec_type) {
1352  case AVMEDIA_TYPE_VIDEO: video_size += s; break;
1353  case AVMEDIA_TYPE_AUDIO: audio_size += s; break;
1354  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += s; break;
1355  default: other_size += s; break;
1356  }
1357  extra_size += par->extradata_size;
1358  data_size += s;
1359  if (ost->enc_ctx &&
1360  (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1362  pass1_used = 0;
1363  }
1364 
1365  if (data_size && total_size>0 && total_size >= data_size)
1366  percent = 100.0 * (total_size - data_size) / data_size;
1367 
1368  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1369  video_size / 1024.0,
1370  audio_size / 1024.0,
1371  subtitle_size / 1024.0,
1372  other_size / 1024.0,
1373  extra_size / 1024.0);
1374  if (percent >= 0.0)
1375  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1376  else
1377  av_log(NULL, AV_LOG_INFO, "unknown");
1378  av_log(NULL, AV_LOG_INFO, "\n");
1379 
1380  /* print verbose per-stream stats */
1381  for (i = 0; i < nb_input_files; i++) {
1382  InputFile *f = input_files[i];
1383  uint64_t total_packets = 0, total_size = 0;
1384 
1385  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1386  i, f->ctx->url);
1387 
1388  for (j = 0; j < f->nb_streams; j++) {
1389  InputStream *ist = f->streams[j];
1390  enum AVMediaType type = ist->par->codec_type;
1391 
1392  total_size += ist->data_size;
1393  total_packets += ist->nb_packets;
1394 
1395  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1397  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1398  ist->nb_packets, ist->data_size);
1399 
1400  if (ist->decoding_needed) {
1401  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1402  ist->frames_decoded);
1403  if (type == AVMEDIA_TYPE_AUDIO)
1404  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1405  av_log(NULL, AV_LOG_VERBOSE, "; ");
1406  }
1407 
1408  av_log(NULL, AV_LOG_VERBOSE, "\n");
1409  }
1410 
1411  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1412  total_packets, total_size);
1413  }
1414 
1415  for (i = 0; i < nb_output_files; i++) {
1416  OutputFile *of = output_files[i];
1417  uint64_t total_packets = 0, total_size = 0;
1418 
1419  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1420  i, of->url);
1421 
1422  for (j = 0; j < of->nb_streams; j++) {
1423  OutputStream *ost = of->streams[j];
1424  enum AVMediaType type = ost->st->codecpar->codec_type;
1425 
1426  total_size += ost->data_size_mux;
1427  total_packets += atomic_load(&ost->packets_written);
1428 
1429  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1431  if (ost->enc_ctx) {
1432  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1433  ost->frames_encoded);
1434  if (type == AVMEDIA_TYPE_AUDIO)
1435  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1436  av_log(NULL, AV_LOG_VERBOSE, "; ");
1437  }
1438 
1439  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1440  atomic_load(&ost->packets_written), ost->data_size_mux);
1441 
1442  av_log(NULL, AV_LOG_VERBOSE, "\n");
1443  }
1444 
1445  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1446  total_packets, total_size);
1447  }
1448  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1449  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1450  if (pass1_used) {
1451  av_log(NULL, AV_LOG_WARNING, "\n");
1452  } else {
1453  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1454  }
1455  }
1456 }
1457 
1458 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1459 {
1460  AVBPrint buf, buf_script;
1461  int64_t total_size = of_filesize(output_files[0]);
1462  int vid;
1463  double bitrate;
1464  double speed;
1465  int64_t pts = INT64_MIN + 1;
1466  static int64_t last_time = -1;
1467  static int first_report = 1;
1468  static int qp_histogram[52];
1469  int hours, mins, secs, us;
1470  const char *hours_sign;
1471  int ret;
1472  float t;
1473 
1474  if (!print_stats && !is_last_report && !progress_avio)
1475  return;
1476 
1477  if (!is_last_report) {
1478  if (last_time == -1) {
1479  last_time = cur_time;
1480  }
1481  if (((cur_time - last_time) < stats_period && !first_report) ||
1482  (first_report && nb_output_dumped < nb_output_files))
1483  return;
1484  last_time = cur_time;
1485  }
1486 
1487  t = (cur_time-timer_start) / 1000000.0;
1488 
1489  vid = 0;
1491  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1492  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
1493  const AVCodecContext * const enc = ost->enc_ctx;
1494  const float q = enc ? ost->quality / (float) FF_QP2LAMBDA : -1;
1495 
1496  if (vid && ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
1497  av_bprintf(&buf, "q=%2.1f ", q);
1498  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1499  ost->file_index, ost->index, q);
1500  }
1501  if (!vid && ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
1502  float fps;
1503  uint64_t frame_number = atomic_load(&ost->packets_written);
1504 
1505  fps = t > 1 ? frame_number / t : 0;
1506  av_bprintf(&buf, "frame=%5"PRId64" fps=%3.*f q=%3.1f ",
1507  frame_number, fps < 9.95, fps, q);
1508  av_bprintf(&buf_script, "frame=%"PRId64"\n", frame_number);
1509  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1510  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1511  ost->file_index, ost->index, q);
1512  if (is_last_report)
1513  av_bprintf(&buf, "L");
1514  if (qp_hist) {
1515  int j;
1516  int qp = lrintf(q);
1517  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1518  qp_histogram[qp]++;
1519  for (j = 0; j < 32; j++)
1520  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1521  }
1522 
1523  if (enc && (enc->flags & AV_CODEC_FLAG_PSNR) &&
1524  (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1525  int j;
1526  double error, error_sum = 0;
1527  double scale, scale_sum = 0;
1528  double p;
1529  char type[3] = { 'Y','U','V' };
1530  av_bprintf(&buf, "PSNR=");
1531  for (j = 0; j < 3; j++) {
1532  if (is_last_report) {
1533  error = enc->error[j];
1534  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1535  } else {
1536  error = ost->error[j];
1537  scale = enc->width * enc->height * 255.0 * 255.0;
1538  }
1539  if (j)
1540  scale /= 4;
1541  error_sum += error;
1542  scale_sum += scale;
1543  p = psnr(error / scale);
1544  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1545  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1546  ost->file_index, ost->index, type[j] | 32, p);
1547  }
1548  p = psnr(error_sum / scale_sum);
1549  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1550  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1551  ost->file_index, ost->index, p);
1552  }
1553  vid = 1;
1554  }
1555  /* compute min output value */
1556  if (ost->last_mux_dts != AV_NOPTS_VALUE) {
1557  pts = FFMAX(pts, ost->last_mux_dts);
1558  if (copy_ts) {
1559  if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1563  }
1564  }
1565 
1566  if (is_last_report)
1567  nb_frames_drop += ost->last_dropped;
1568  }
1569 
1570  secs = FFABS(pts) / AV_TIME_BASE;
1571  us = FFABS(pts) % AV_TIME_BASE;
1572  mins = secs / 60;
1573  secs %= 60;
1574  hours = mins / 60;
1575  mins %= 60;
1576  hours_sign = (pts < 0) ? "-" : "";
1577 
1578  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1579  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1580 
1581  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1582  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1583  if (pts == AV_NOPTS_VALUE) {
1584  av_bprintf(&buf, "N/A ");
1585  } else {
1586  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1587  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1588  }
1589 
1590  if (bitrate < 0) {
1591  av_bprintf(&buf, "bitrate=N/A");
1592  av_bprintf(&buf_script, "bitrate=N/A\n");
1593  }else{
1594  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1595  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1596  }
1597 
1598  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1599  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1600  if (pts == AV_NOPTS_VALUE) {
1601  av_bprintf(&buf_script, "out_time_us=N/A\n");
1602  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1603  av_bprintf(&buf_script, "out_time=N/A\n");
1604  } else {
1605  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1606  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1607  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1608  hours_sign, hours, mins, secs, us);
1609  }
1610 
1612  av_bprintf(&buf, " dup=%"PRId64" drop=%"PRId64, nb_frames_dup, nb_frames_drop);
1613  av_bprintf(&buf_script, "dup_frames=%"PRId64"\n", nb_frames_dup);
1614  av_bprintf(&buf_script, "drop_frames=%"PRId64"\n", nb_frames_drop);
1615 
1616  if (speed < 0) {
1617  av_bprintf(&buf, " speed=N/A");
1618  av_bprintf(&buf_script, "speed=N/A\n");
1619  } else {
1620  av_bprintf(&buf, " speed=%4.3gx", speed);
1621  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1622  }
1623 
1624  if (print_stats || is_last_report) {
1625  const char end = is_last_report ? '\n' : '\r';
1626  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1627  fprintf(stderr, "%s %c", buf.str, end);
1628  } else
1629  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1630 
1631  fflush(stderr);
1632  }
1633  av_bprint_finalize(&buf, NULL);
1634 
1635  if (progress_avio) {
1636  av_bprintf(&buf_script, "progress=%s\n",
1637  is_last_report ? "end" : "continue");
1638  avio_write(progress_avio, buf_script.str,
1639  FFMIN(buf_script.len, buf_script.size - 1));
1641  av_bprint_finalize(&buf_script, NULL);
1642  if (is_last_report) {
1643  if ((ret = avio_closep(&progress_avio)) < 0)
1645  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1646  }
1647  }
1648 
1649  first_report = 0;
1650 
1651  if (is_last_report)
1652  print_final_stats(total_size);
1653 }
1654 
1656 {
1657  int ret;
1658 
1659  // We never got any input. Set a fake format, which will
1660  // come from libavformat.
1661  ifilter->format = par->format;
1662  ifilter->sample_rate = par->sample_rate;
1663  ifilter->width = par->width;
1664  ifilter->height = par->height;
1667  if (ret < 0)
1668  return ret;
1669 
1670  return 0;
1671 }
1672 
1673 static void flush_encoders(void)
1674 {
1675  int ret;
1676 
1677  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
1678  OutputFile *of = output_files[ost->file_index];
1679  if (ost->sq_idx_encode >= 0)
1680  sq_send(of->sq_encode, ost->sq_idx_encode, SQFRAME(NULL));
1681  }
1682 
1683  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
1684  AVCodecContext *enc = ost->enc_ctx;
1685  OutputFile *of = output_files[ost->file_index];
1686 
1687  if (!enc)
1688  continue;
1689 
1690  // Try to enable encoding with no input frames.
1691  // Maybe we should just let encoding fail instead.
1692  if (!ost->initialized) {
1693  FilterGraph *fg = ost->filter->graph;
1694 
1696  "Finishing stream %d:%d without any data written to it.\n",
1697  ost->file_index, ost->st->index);
1698 
1699  if (ost->filter && !fg->graph) {
1700  int x;
1701  for (x = 0; x < fg->nb_inputs; x++) {
1702  InputFilter *ifilter = fg->inputs[x];
1703  if (ifilter->format < 0 &&
1704  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->par) < 0) {
1705  av_log(NULL, AV_LOG_ERROR, "Error copying paramerets from input stream\n");
1706  exit_program(1);
1707  }
1708  }
1709 
1711  continue;
1712 
1713  ret = configure_filtergraph(fg);
1714  if (ret < 0) {
1715  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1716  exit_program(1);
1717  }
1718 
1719  of_output_packet(of, ost->pkt, ost, 1);
1720  }
1721 
1723  }
1724 
1726  continue;
1727 
1728  ret = submit_encode_frame(of, ost, NULL);
1729  if (ret != AVERROR_EOF)
1730  exit_program(1);
1731  }
1732 }
1733 
1734 /*
1735  * Check whether a packet from ist should be written into ost at this time
1736  */
1738 {
1739  OutputFile *of = output_files[ost->file_index];
1740 
1741  if (ost->ist != ist)
1742  return 0;
1743 
1744  if (ost->finished & MUXER_FINISHED)
1745  return 0;
1746 
1747  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1748  return 0;
1749 
1750  return 1;
1751 }
1752 
1754 {
1755  OutputFile *of = output_files[ost->file_index];
1756  InputFile *f = input_files [ist->file_index];
1757  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1758  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1759  AVPacket *opkt = ost->pkt;
1760 
1761  av_packet_unref(opkt);
1762  // EOF: flush output bitstream filters.
1763  if (!pkt) {
1764  of_output_packet(of, opkt, ost, 1);
1765  return;
1766  }
1767 
1768  if (!ost->streamcopy_started && !(pkt->flags & AV_PKT_FLAG_KEY) &&
1769  !ost->copy_initial_nonkeyframes)
1770  return;
1771 
1772  if (!ost->streamcopy_started && !ost->copy_prior_start) {
1773  if (pkt->pts == AV_NOPTS_VALUE ?
1774  ist->pts < ost->ts_copy_start :
1775  pkt->pts < av_rescale_q(ost->ts_copy_start, AV_TIME_BASE_Q, ist->st->time_base))
1776  return;
1777  }
1778 
1779  if (of->recording_time != INT64_MAX &&
1780  ist->pts >= of->recording_time + start_time) {
1782  return;
1783  }
1784 
1785  if (f->recording_time != INT64_MAX) {
1786  start_time = 0;
1787  if (copy_ts) {
1788  start_time += f->start_time != AV_NOPTS_VALUE ? f->start_time : 0;
1789  start_time += start_at_zero ? 0 : f->start_time_effective;
1790  }
1791  if (ist->pts >= f->recording_time + start_time) {
1793  return;
1794  }
1795  }
1796 
1797  if (av_packet_ref(opkt, pkt) < 0)
1798  exit_program(1);
1799 
1800  if (pkt->pts != AV_NOPTS_VALUE)
1801  opkt->pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
1802 
1803  if (pkt->dts == AV_NOPTS_VALUE) {
1804  opkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
1805  } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
1807  if(!duration)
1808  duration = ist->par->frame_size;
1809  opkt->dts = av_rescale_delta(ist->st->time_base, pkt->dts,
1810  (AVRational){1, ist->par->sample_rate}, duration,
1811  &ist->filter_in_rescale_delta_last, ost->mux_timebase);
1812  /* dts will be set immediately afterwards to what pts is now */
1813  opkt->pts = opkt->dts - ost_tb_start_time;
1814  } else
1815  opkt->dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
1816  opkt->dts -= ost_tb_start_time;
1817 
1818  opkt->duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
1819 
1820  of_output_packet(of, opkt, ost, 0);
1821 
1822  ost->streamcopy_started = 1;
1823 }
1824 
1825 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1826 {
1827  if (*got_output || ret<0)
1828  decode_error_stat[ret<0] ++;
1829 
1830  if (ret < 0 && exit_on_error)
1831  exit_program(1);
1832 
1833  if (*got_output && ist) {
1836  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
1837  if (exit_on_error)
1838  exit_program(1);
1839  }
1840  }
1841 }
1842 
1843 // Filters can be configured only if the formats of all inputs are known.
1845 {
1846  int i;
1847  for (i = 0; i < fg->nb_inputs; i++) {
1848  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
1849  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
1850  return 0;
1851  }
1852  return 1;
1853 }
1854 
1855 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_reference)
1856 {
1857  FilterGraph *fg = ifilter->graph;
1858  AVFrameSideData *sd;
1859  int need_reinit, ret;
1860  int buffersrc_flags = AV_BUFFERSRC_FLAG_PUSH;
1861 
1862  if (keep_reference)
1863  buffersrc_flags |= AV_BUFFERSRC_FLAG_KEEP_REF;
1864 
1865  /* determine if the parameters for this input changed */
1866  need_reinit = ifilter->format != frame->format;
1867 
1868  switch (ifilter->ist->par->codec_type) {
1869  case AVMEDIA_TYPE_AUDIO:
1870  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
1871  av_channel_layout_compare(&ifilter->ch_layout, &frame->ch_layout);
1872  break;
1873  case AVMEDIA_TYPE_VIDEO:
1874  need_reinit |= ifilter->width != frame->width ||
1875  ifilter->height != frame->height;
1876  break;
1877  }
1878 
1879  if (!ifilter->ist->reinit_filters && fg->graph)
1880  need_reinit = 0;
1881 
1882  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
1883  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
1884  need_reinit = 1;
1885 
1887  if (!ifilter->displaymatrix || memcmp(sd->data, ifilter->displaymatrix, sizeof(int32_t) * 9))
1888  need_reinit = 1;
1889  } else if (ifilter->displaymatrix)
1890  need_reinit = 1;
1891 
1892  if (need_reinit) {
1894  if (ret < 0)
1895  return ret;
1896  }
1897 
1898  /* (re)init the graph if possible, otherwise buffer the frame and return */
1899  if (need_reinit || !fg->graph) {
1900  if (!ifilter_has_all_input_formats(fg)) {
1902  if (!tmp)
1903  return AVERROR(ENOMEM);
1904 
1905  ret = av_fifo_write(ifilter->frame_queue, &tmp, 1);
1906  if (ret < 0)
1907  av_frame_free(&tmp);
1908 
1909  return ret;
1910  }
1911 
1912  ret = reap_filters(1);
1913  if (ret < 0 && ret != AVERROR_EOF) {
1914  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
1915  return ret;
1916  }
1917 
1918  ret = configure_filtergraph(fg);
1919  if (ret < 0) {
1920  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
1921  return ret;
1922  }
1923  }
1924 
1925  ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, buffersrc_flags);
1926  if (ret < 0) {
1927  if (ret != AVERROR_EOF)
1928  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
1929  return ret;
1930  }
1931 
1932  return 0;
1933 }
1934 
1935 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
1936 {
1937  int ret;
1938 
1939  ifilter->eof = 1;
1940 
1941  if (ifilter->filter) {
1943  if (ret < 0)
1944  return ret;
1945  } else {
1946  // the filtergraph was never configured
1947  if (ifilter->format < 0) {
1948  ret = ifilter_parameters_from_codecpar(ifilter, ifilter->ist->par);
1949  if (ret < 0)
1950  return ret;
1951  }
1952  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
1953  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
1954  return AVERROR_INVALIDDATA;
1955  }
1956  }
1957 
1958  return 0;
1959 }
1960 
1961 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
1962 // There is the following difference: if you got a frame, you must call
1963 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
1964 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
1965 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
1966 {
1967  int ret;
1968 
1969  *got_frame = 0;
1970 
1971  if (pkt) {
1972  ret = avcodec_send_packet(avctx, pkt);
1973  // In particular, we don't expect AVERROR(EAGAIN), because we read all
1974  // decoded frames with avcodec_receive_frame() until done.
1975  if (ret < 0 && ret != AVERROR_EOF)
1976  return ret;
1977  }
1978 
1979  ret = avcodec_receive_frame(avctx, frame);
1980  if (ret < 0 && ret != AVERROR(EAGAIN))
1981  return ret;
1982  if (ret >= 0)
1983  *got_frame = 1;
1984 
1985  return 0;
1986 }
1987 
1989 {
1990  int i, ret;
1991 
1992  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
1993  for (i = 0; i < ist->nb_filters; i++) {
1994  ret = ifilter_send_frame(ist->filters[i], decoded_frame, i < ist->nb_filters - 1);
1995  if (ret == AVERROR_EOF)
1996  ret = 0; /* ignore */
1997  if (ret < 0) {
1999  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2000  break;
2001  }
2002  }
2003  return ret;
2004 }
2005 
2007  int *decode_failed)
2008 {
2010  AVCodecContext *avctx = ist->dec_ctx;
2011  int ret, err = 0;
2012  AVRational decoded_frame_tb;
2013 
2015  ret = decode(avctx, decoded_frame, got_output, pkt);
2016  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2017  if (ret < 0)
2018  *decode_failed = 1;
2019 
2020  if (ret >= 0 && avctx->sample_rate <= 0) {
2021  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2023  }
2024 
2025  if (ret != AVERROR_EOF)
2027 
2028  if (!*got_output || ret < 0)
2029  return ret;
2030 
2032  ist->frames_decoded++;
2033 
2034  /* increment next_dts to use for the case where the input stream does not
2035  have timestamps or there are multiple frames in the packet */
2036  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2037  avctx->sample_rate;
2038  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2039  avctx->sample_rate;
2040 
2041  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2042  decoded_frame_tb = ist->st->time_base;
2043  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2044  decoded_frame->pts = pkt->pts;
2045  decoded_frame_tb = ist->st->time_base;
2046  }else {
2047  decoded_frame->pts = ist->dts;
2048  decoded_frame_tb = AV_TIME_BASE_Q;
2049  }
2050  if (pkt && pkt->duration && ist->prev_pkt_pts != AV_NOPTS_VALUE &&
2051  pkt->pts != AV_NOPTS_VALUE && pkt->pts - ist->prev_pkt_pts > pkt->duration)
2053  if (pkt)
2054  ist->prev_pkt_pts = pkt->pts;
2056  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2057  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2058  (AVRational){1, avctx->sample_rate});
2061 
2063  return err < 0 ? err : ret;
2064 }
2065 
2066 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2067  int *decode_failed)
2068 {
2070  int i, ret = 0, err = 0;
2071  int64_t best_effort_timestamp;
2072  int64_t dts = AV_NOPTS_VALUE;
2073 
2074  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2075  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2076  // skip the packet.
2077  if (!eof && pkt && pkt->size == 0)
2078  return 0;
2079 
2080  if (ist->dts != AV_NOPTS_VALUE)
2081  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2082  if (pkt) {
2083  pkt->dts = dts; // ffmpeg.c probably shouldn't do this
2084  }
2085 
2086  // The old code used to set dts on the drain packet, which does not work
2087  // with the new API anymore.
2088  if (eof) {
2089  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2090  if (!new)
2091  return AVERROR(ENOMEM);
2092  ist->dts_buffer = new;
2093  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2094  }
2095 
2098  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2099  if (ret < 0)
2100  *decode_failed = 1;
2101 
2102  // The following line may be required in some cases where there is no parser
2103  // or the parser does not has_b_frames correctly
2104  if (ist->par->video_delay < ist->dec_ctx->has_b_frames) {
2105  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2106  ist->par->video_delay = ist->dec_ctx->has_b_frames;
2107  } else
2109  "video_delay is larger in decoder than demuxer %d > %d.\n"
2110  "If you want to help, upload a sample "
2111  "of this file to https://streams.videolan.org/upload/ "
2112  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2113  ist->dec_ctx->has_b_frames,
2114  ist->par->video_delay);
2115  }
2116 
2117  if (ret != AVERROR_EOF)
2119 
2120  if (*got_output && ret >= 0) {
2121  if (ist->dec_ctx->width != decoded_frame->width ||
2122  ist->dec_ctx->height != decoded_frame->height ||
2123  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2124  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2128  ist->dec_ctx->width,
2129  ist->dec_ctx->height,
2130  ist->dec_ctx->pix_fmt);
2131  }
2132  }
2133 
2134  if (!*got_output || ret < 0)
2135  return ret;
2136 
2137  if(ist->top_field_first>=0)
2139 
2140  ist->frames_decoded++;
2141 
2143  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2144  if (err < 0)
2145  goto fail;
2146  }
2147 
2148  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2149  *duration_pts = decoded_frame->duration;
2150 
2151  if (ist->framerate.num)
2152  best_effort_timestamp = ist->cfr_next_pts++;
2153 
2154  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2155  best_effort_timestamp = ist->dts_buffer[0];
2156 
2157  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2158  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2159  ist->nb_dts_buffer--;
2160  }
2161 
2162  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2163  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2164 
2165  if (ts != AV_NOPTS_VALUE)
2166  ist->next_pts = ist->pts = ts;
2167  }
2168 
2169  if (debug_ts) {
2170  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2171  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2172  ist->st->index, av_ts2str(decoded_frame->pts),
2174  best_effort_timestamp,
2175  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2177  ist->st->time_base.num, ist->st->time_base.den);
2178  }
2179 
2180  if (ist->st->sample_aspect_ratio.num)
2182 
2184 
2185 fail:
2187  return err < 0 ? err : ret;
2188 }
2189 
2191  int *decode_failed)
2192 {
2194  int free_sub = 1;
2196  &subtitle, got_output, pkt);
2197 
2199 
2200  if (ret < 0 || !*got_output) {
2201  *decode_failed = 1;
2202  if (!pkt->size)
2203  sub2video_flush(ist);
2204  return ret;
2205  }
2206 
2207  if (ist->fix_sub_duration) {
2208  int end = 1;
2209  if (ist->prev_sub.got_output) {
2210  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2211  1000, AV_TIME_BASE);
2212  if (end < ist->prev_sub.subtitle.end_display_time) {
2214  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2216  end <= 0 ? ", dropping it" : "");
2217  ist->prev_sub.subtitle.end_display_time = end;
2218  }
2219  }
2220  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2221  FFSWAP(int, ret, ist->prev_sub.ret);
2223  if (end <= 0)
2224  goto out;
2225  }
2226 
2227  if (!*got_output)
2228  return ret;
2229 
2230  if (ist->sub2video.frame) {
2231  sub2video_update(ist, INT64_MIN, &subtitle);
2232  } else if (ist->nb_filters) {
2233  if (!ist->sub2video.sub_queue)
2235  if (!ist->sub2video.sub_queue)
2236  report_and_exit(AVERROR(ENOMEM));
2237 
2239  if (ret < 0)
2240  exit_program(1);
2241  free_sub = 0;
2242  }
2243 
2244  if (!subtitle.num_rects)
2245  goto out;
2246 
2247  ist->frames_decoded++;
2248 
2249  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
2250  if (!check_output_constraints(ist, ost) || !ost->enc_ctx
2251  || ost->enc_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE)
2252  continue;
2253 
2254  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2255  }
2256 
2257 out:
2258  if (free_sub)
2260  return ret;
2261 }
2262 
2264 {
2265  int i, ret;
2266  /* TODO keep pts also in stream time base to avoid converting back */
2267  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2269 
2270  for (i = 0; i < ist->nb_filters; i++) {
2271  ret = ifilter_send_eof(ist->filters[i], pts);
2272  if (ret < 0)
2273  return ret;
2274  }
2275  return 0;
2276 }
2277 
2278 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2279 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2280 {
2281  const AVCodecParameters *par = ist->par;
2282  int ret = 0;
2283  int repeating = 0;
2284  int eof_reached = 0;
2285 
2286  AVPacket *avpkt = ist->pkt;
2287 
2288  if (!ist->saw_first_ts) {
2289  ist->first_dts =
2290  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2291  ist->pts = 0;
2292  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2293  ist->first_dts =
2294  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2295  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2296  }
2297  ist->saw_first_ts = 1;
2298  }
2299 
2300  if (ist->next_dts == AV_NOPTS_VALUE)
2301  ist->next_dts = ist->dts;
2302  if (ist->next_pts == AV_NOPTS_VALUE)
2303  ist->next_pts = ist->pts;
2304 
2305  if (pkt) {
2306  av_packet_unref(avpkt);
2307  ret = av_packet_ref(avpkt, pkt);
2308  if (ret < 0)
2309  return ret;
2310  }
2311 
2312  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2313  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2315  ist->next_pts = ist->pts = ist->dts;
2316  }
2317 
2318  // while we have more to decode or while the decoder did output something on EOF
2319  while (ist->decoding_needed) {
2320  int64_t duration_dts = 0;
2321  int64_t duration_pts = 0;
2322  int got_output = 0;
2323  int decode_failed = 0;
2324 
2325  ist->pts = ist->next_pts;
2326  ist->dts = ist->next_dts;
2327 
2328  switch (par->codec_type) {
2329  case AVMEDIA_TYPE_AUDIO:
2330  ret = decode_audio (ist, repeating ? NULL : avpkt, &got_output,
2331  &decode_failed);
2332  av_packet_unref(avpkt);
2333  break;
2334  case AVMEDIA_TYPE_VIDEO:
2335  ret = decode_video (ist, repeating ? NULL : avpkt, &got_output, &duration_pts, !pkt,
2336  &decode_failed);
2337  if (!repeating || !pkt || got_output) {
2338  if (pkt && pkt->duration) {
2339  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2340  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2341  int ticks = ist->last_pkt_repeat_pict >= 0 ?
2342  ist->last_pkt_repeat_pict + 1 :
2343  ist->dec_ctx->ticks_per_frame;
2344  duration_dts = ((int64_t)AV_TIME_BASE *
2345  ist->dec_ctx->framerate.den * ticks) /
2347  }
2348 
2349  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2350  ist->next_dts += duration_dts;
2351  }else
2352  ist->next_dts = AV_NOPTS_VALUE;
2353  }
2354 
2355  if (got_output) {
2356  if (duration_pts > 0) {
2357  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2358  } else {
2359  ist->next_pts += duration_dts;
2360  }
2361  }
2362  av_packet_unref(avpkt);
2363  break;
2364  case AVMEDIA_TYPE_SUBTITLE:
2365  if (repeating)
2366  break;
2367  ret = transcode_subtitles(ist, avpkt, &got_output, &decode_failed);
2368  if (!pkt && ret >= 0)
2369  ret = AVERROR_EOF;
2370  av_packet_unref(avpkt);
2371  break;
2372  default:
2373  return -1;
2374  }
2375 
2376  if (ret == AVERROR_EOF) {
2377  eof_reached = 1;
2378  break;
2379  }
2380 
2381  if (ret < 0) {
2382  if (decode_failed) {
2383  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2384  ist->file_index, ist->st->index, av_err2str(ret));
2385  } else {
2386  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2387  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2388  }
2389  if (!decode_failed || exit_on_error)
2390  exit_program(1);
2391  break;
2392  }
2393 
2394  if (got_output)
2395  ist->got_output = 1;
2396 
2397  if (!got_output)
2398  break;
2399 
2400  // During draining, we might get multiple output frames in this loop.
2401  // ffmpeg.c does not drain the filter chain on configuration changes,
2402  // which means if we send multiple frames at once to the filters, and
2403  // one of those frames changes configuration, the buffered frames will
2404  // be lost. This can upset certain FATE tests.
2405  // Decode only 1 frame per call on EOF to appease these FATE tests.
2406  // The ideal solution would be to rewrite decoding to use the new
2407  // decoding API in a better way.
2408  if (!pkt)
2409  break;
2410 
2411  repeating = 1;
2412  }
2413 
2414  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2415  /* except when looping we need to flush but not to send an EOF */
2416  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2417  int ret = send_filter_eof(ist);
2418  if (ret < 0) {
2419  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2420  exit_program(1);
2421  }
2422  }
2423 
2424  /* handle stream copy */
2425  if (!ist->decoding_needed && pkt) {
2426  ist->dts = ist->next_dts;
2427  switch (par->codec_type) {
2428  case AVMEDIA_TYPE_AUDIO:
2429  av_assert1(pkt->duration >= 0);
2430  if (par->sample_rate) {
2431  ist->next_dts += ((int64_t)AV_TIME_BASE * par->frame_size) /
2432  par->sample_rate;
2433  } else {
2435  }
2436  break;
2437  case AVMEDIA_TYPE_VIDEO:
2438  if (ist->framerate.num) {
2439  // TODO: Remove work-around for c99-to-c89 issue 7
2440  AVRational time_base_q = AV_TIME_BASE_Q;
2441  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2442  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2443  } else if (pkt->duration) {
2445  } else if(ist->dec_ctx->framerate.num != 0) {
2446  int ticks = ist->last_pkt_repeat_pict >= 0 ?
2447  ist->last_pkt_repeat_pict + 1 :
2448  ist->dec_ctx->ticks_per_frame;
2449  ist->next_dts += ((int64_t)AV_TIME_BASE *
2450  ist->dec_ctx->framerate.den * ticks) /
2452  }
2453  break;
2454  }
2455  ist->pts = ist->dts;
2456  ist->next_pts = ist->next_dts;
2457  } else if (!ist->decoding_needed)
2458  eof_reached = 1;
2459 
2460  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
2461  if (!check_output_constraints(ist, ost) || ost->enc_ctx ||
2462  (!pkt && no_eof))
2463  continue;
2464 
2465  do_streamcopy(ist, ost, pkt);
2466  }
2467 
2468  return !eof_reached;
2469 }
2470 
2472 {
2473  InputStream *ist = s->opaque;
2474  const enum AVPixelFormat *p;
2475  int ret;
2476 
2477  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2479  const AVCodecHWConfig *config = NULL;
2480  int i;
2481 
2482  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2483  break;
2484 
2485  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2486  ist->hwaccel_id == HWACCEL_AUTO) {
2487  for (i = 0;; i++) {
2488  config = avcodec_get_hw_config(s->codec, i);
2489  if (!config)
2490  break;
2491  if (!(config->methods &
2493  continue;
2494  if (config->pix_fmt == *p)
2495  break;
2496  }
2497  }
2498  if (config && config->device_type == ist->hwaccel_device_type) {
2500  if (ret < 0) {
2501  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2503  "%s hwaccel requested for input stream #%d:%d, "
2504  "but cannot be initialized.\n",
2505  av_hwdevice_get_type_name(config->device_type),
2506  ist->file_index, ist->st->index);
2507  return AV_PIX_FMT_NONE;
2508  }
2509  continue;
2510  }
2511 
2512  ist->hwaccel_pix_fmt = *p;
2513  break;
2514  }
2515  }
2516 
2517  return *p;
2518 }
2519 
2520 static int init_input_stream(InputStream *ist, char *error, int error_len)
2521 {
2522  int ret;
2523 
2524  if (ist->decoding_needed) {
2525  const AVCodec *codec = ist->dec;
2526  if (!codec) {
2527  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2528  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2529  return AVERROR(EINVAL);
2530  }
2531 
2532  ist->dec_ctx->opaque = ist;
2533  ist->dec_ctx->get_format = get_format;
2534 #if LIBAVCODEC_VERSION_MAJOR < 60
2536  ist->dec_ctx->thread_safe_callbacks = 1;
2537  })
2538 #endif
2539 
2540  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2541  (ist->decoding_needed & DECODING_FOR_OST)) {
2542  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2544  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2545  }
2546 
2547  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2548  * audio, and video decoders such as cuvid or mediacodec */
2549  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2550 
2551  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2552  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2553  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2555  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2556 
2558  if (ret < 0) {
2559  snprintf(error, error_len, "Device setup failed for "
2560  "decoder on input stream #%d:%d : %s",
2561  ist->file_index, ist->st->index, av_err2str(ret));
2562  return ret;
2563  }
2564 
2565  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2566  if (ret == AVERROR_EXPERIMENTAL)
2567  abort_codec_experimental(codec, 0);
2568 
2569  snprintf(error, error_len,
2570  "Error while opening decoder for input stream "
2571  "#%d:%d : %s",
2572  ist->file_index, ist->st->index, av_err2str(ret));
2573  return ret;
2574  }
2576  }
2577 
2578  ist->next_pts = AV_NOPTS_VALUE;
2579  ist->next_dts = AV_NOPTS_VALUE;
2580 
2581  return 0;
2582 }
2583 
2585 {
2586  OutputFile *of = output_files[ost->file_index];
2587  InputStream *ist = ost->ist;
2590  AVCodecContext *codec_ctx;
2591  AVRational sar;
2592  int i, ret;
2593  uint32_t codec_tag = par->codec_tag;
2594 
2595  av_assert0(ist && !ost->filter);
2596 
2597  codec_ctx = avcodec_alloc_context3(NULL);
2598  if (!codec_ctx)
2599  return AVERROR(ENOMEM);
2600 
2601  ret = avcodec_parameters_to_context(codec_ctx, ist->par);
2602  if (ret >= 0)
2603  ret = av_opt_set_dict(codec_ctx, &ost->encoder_opts);
2604  if (ret < 0) {
2606  "Error setting up codec context options.\n");
2607  avcodec_free_context(&codec_ctx);
2608  return ret;
2609  }
2610 
2611  ret = avcodec_parameters_from_context(par, codec_ctx);
2612  avcodec_free_context(&codec_ctx);
2613  if (ret < 0) {
2615  "Error getting reference codec parameters.\n");
2616  return ret;
2617  }
2618 
2619  if (!codec_tag) {
2620  unsigned int codec_tag_tmp;
2621  if (!of->format->codec_tag ||
2623  !av_codec_get_tag2(of->format->codec_tag, par->codec_id, &codec_tag_tmp))
2624  codec_tag = par->codec_tag;
2625  }
2626 
2627  par->codec_tag = codec_tag;
2628 
2629  if (!ost->frame_rate.num)
2630  ost->frame_rate = ist->framerate;
2631 
2632  if (ost->frame_rate.num)
2633  ost->st->avg_frame_rate = ost->frame_rate;
2634  else
2635  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2636 
2638  if (ret < 0)
2639  return ret;
2640 
2641  // copy timebase while removing common factors
2642  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) {
2643  if (ost->frame_rate.num)
2644  ost->st->time_base = av_inv_q(ost->frame_rate);
2645  else
2647  }
2648 
2649  // copy estimated duration as a hint to the muxer
2650  if (ost->st->duration <= 0 && ist->st->duration > 0)
2651  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
2652 
2653  if (!ost->copy_prior_start) {
2654  ost->ts_copy_start = (of->start_time == AV_NOPTS_VALUE) ?
2655  0 : of->start_time;
2656  if (copy_ts && ifile->start_time != AV_NOPTS_VALUE) {
2657  ost->ts_copy_start = FFMAX(ost->ts_copy_start,
2658  ifile->start_time + ifile->ts_offset);
2659  }
2660  }
2661 
2662  if (ist->st->nb_side_data) {
2663  for (i = 0; i < ist->st->nb_side_data; i++) {
2664  const AVPacketSideData *sd_src = &ist->st->side_data[i];
2665  uint8_t *dst_data;
2666 
2667  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
2668  if (!dst_data)
2669  return AVERROR(ENOMEM);
2670  memcpy(dst_data, sd_src->data, sd_src->size);
2671  }
2672  }
2673 
2674 #if FFMPEG_ROTATION_METADATA
2675  if (ost->rotate_overridden) {
2677  sizeof(int32_t) * 9);
2678  if (sd)
2679  av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
2680  }
2681 #endif
2682 
2683  switch (par->codec_type) {
2684  case AVMEDIA_TYPE_AUDIO:
2685  if ((par->block_align == 1 || par->block_align == 1152 || par->block_align == 576) &&
2687  par->block_align = 0;
2688  if (par->codec_id == AV_CODEC_ID_AC3)
2689  par->block_align = 0;
2690  break;
2691  case AVMEDIA_TYPE_VIDEO:
2692  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2693  sar =
2694  av_mul_q(ost->frame_aspect_ratio,
2695  (AVRational){ par->height, par->width });
2696  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2697  "with stream copy may produce invalid files\n");
2698  }
2699  else if (ist->st->sample_aspect_ratio.num)
2700  sar = ist->st->sample_aspect_ratio;
2701  else
2702  sar = par->sample_aspect_ratio;
2704  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2705  ost->st->r_frame_rate = ist->st->r_frame_rate;
2706  break;
2707  }
2708 
2709  ost->mux_timebase = ist->st->time_base;
2710 
2711  return 0;
2712 }
2713 
2715 {
2716  const char *cname = ost->enc_ctx->codec->name;
2717  uint8_t *encoder_string;
2718  int encoder_string_len;
2719 
2720  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2721  return;
2722 
2723  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(cname) + 2;
2724  encoder_string = av_mallocz(encoder_string_len);
2725  if (!encoder_string)
2726  report_and_exit(AVERROR(ENOMEM));
2727 
2728  if (!of->bitexact && !ost->bitexact)
2729  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2730  else
2731  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2732  av_strlcat(encoder_string, cname, encoder_string_len);
2733  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2735 }
2736 
2737 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
2738 {
2739  InputStream *ist = ost->ist;
2740  AVCodecContext *enc_ctx = ost->enc_ctx;
2741 
2742  if (ost->enc_timebase.num > 0) {
2743  enc_ctx->time_base = ost->enc_timebase;
2744  return;
2745  }
2746 
2747  if (ost->enc_timebase.num < 0) {
2748  if (ist) {
2749  enc_ctx->time_base = ist->st->time_base;
2750  return;
2751  }
2752 
2754  "Input stream data for output stream #%d:%d not available, "
2755  "using default time base\n", ost->file_index, ost->index);
2756  }
2757 
2758  enc_ctx->time_base = default_time_base;
2759 }
2760 
2762 {
2763  InputStream *ist = ost->ist;
2764  AVCodecContext *enc_ctx = ost->enc_ctx;
2766  OutputFile *of = output_files[ost->file_index];
2767  int ret;
2768 
2769  set_encoder_id(output_files[ost->file_index], ost);
2770 
2771  if (ist) {
2772  dec_ctx = ist->dec_ctx;
2773  }
2774 
2775  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2776  if (!ost->frame_rate.num)
2777  ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
2778  if (!ost->frame_rate.num && !ost->max_frame_rate.num) {
2779  ost->frame_rate = (AVRational){25, 1};
2781  "No information "
2782  "about the input framerate is available. Falling "
2783  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
2784  "if you want a different framerate.\n",
2785  ost->file_index, ost->index);
2786  }
2787 
2788  if (ost->max_frame_rate.num &&
2789  (av_q2d(ost->frame_rate) > av_q2d(ost->max_frame_rate) ||
2790  !ost->frame_rate.den))
2791  ost->frame_rate = ost->max_frame_rate;
2792 
2793  if (enc_ctx->codec->supported_framerates && !ost->force_fps) {
2794  int idx = av_find_nearest_q_idx(ost->frame_rate, enc_ctx->codec->supported_framerates);
2795  ost->frame_rate = enc_ctx->codec->supported_framerates[idx];
2796  }
2797  // reduce frame rate for mpeg4 to be within the spec limits
2798  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
2799  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
2800  ost->frame_rate.num, ost->frame_rate.den, 65535);
2801  }
2802  }
2803 
2804  switch (enc_ctx->codec_type) {
2805  case AVMEDIA_TYPE_AUDIO:
2806  enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
2807  enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
2808  ret = av_buffersink_get_ch_layout(ost->filter->filter, &enc_ctx->ch_layout);
2809  if (ret < 0)
2810  return ret;
2811 
2812  if (ost->bits_per_raw_sample)
2813  enc_ctx->bits_per_raw_sample = ost->bits_per_raw_sample;
2814  else if (dec_ctx && ost->filter->graph->is_meta)
2816  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
2817 
2819  break;
2820 
2821  case AVMEDIA_TYPE_VIDEO:
2822  init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
2823 
2824  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
2825  enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
2826  if ( av_q2d(enc_ctx->time_base) < 0.001 && ost->vsync_method != VSYNC_PASSTHROUGH
2827  && (ost->vsync_method == VSYNC_CFR || ost->vsync_method == VSYNC_VSCFR ||
2828  (ost->vsync_method == VSYNC_AUTO && !(of->format->flags & AVFMT_VARIABLE_FPS)))){
2829  av_log(NULL, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
2830  "Please consider specifying a lower framerate, a different muxer or "
2831  "setting vsync/fps_mode to vfr\n");
2832  }
2833 
2834  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
2835  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
2836  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
2837  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
2838  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
2839  av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
2840 
2841  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
2842 
2843  if (ost->bits_per_raw_sample)
2844  enc_ctx->bits_per_raw_sample = ost->bits_per_raw_sample;
2845  else if (dec_ctx && ost->filter->graph->is_meta)
2847  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
2848 
2849  if (frame) {
2850  enc_ctx->color_range = frame->color_range;
2851  enc_ctx->color_primaries = frame->color_primaries;
2852  enc_ctx->color_trc = frame->color_trc;
2853  enc_ctx->colorspace = frame->colorspace;
2854  enc_ctx->chroma_sample_location = frame->chroma_location;
2855  }
2856 
2857  enc_ctx->framerate = ost->frame_rate;
2858 
2859  ost->st->avg_frame_rate = ost->frame_rate;
2860 
2861  // Field order: autodetection
2862  if (frame) {
2864  ost->top_field_first >= 0)
2865  frame->top_field_first = !!ost->top_field_first;
2866 
2867  if (frame->interlaced_frame) {
2868  if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
2869  enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
2870  else
2871  enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
2872  } else
2873  enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
2874  }
2875 
2876  // Field order: override
2877  if (ost->top_field_first == 0) {
2878  enc_ctx->field_order = AV_FIELD_BB;
2879  } else if (ost->top_field_first == 1) {
2880  enc_ctx->field_order = AV_FIELD_TT;
2881  }
2882 
2883  break;
2884  case AVMEDIA_TYPE_SUBTITLE:
2885  enc_ctx->time_base = AV_TIME_BASE_Q;
2886  if (!enc_ctx->width) {
2887  enc_ctx->width = ost->ist->par->width;
2888  enc_ctx->height = ost->ist->par->height;
2889  }
2890  if (dec_ctx && dec_ctx->subtitle_header) {
2891  /* ASS code assumes this buffer is null terminated so add extra byte. */
2892  ost->enc_ctx->subtitle_header = av_mallocz(dec_ctx->subtitle_header_size + 1);
2893  if (!ost->enc_ctx->subtitle_header)
2894  return AVERROR(ENOMEM);
2895  memcpy(ost->enc_ctx->subtitle_header, dec_ctx->subtitle_header,
2897  ost->enc_ctx->subtitle_header_size = dec_ctx->subtitle_header_size;
2898  }
2899  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE &&
2900  enc_ctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2901  int input_props = 0, output_props = 0;
2902  AVCodecDescriptor const *input_descriptor =
2903  avcodec_descriptor_get(ist->dec->id);
2904  AVCodecDescriptor const *output_descriptor =
2905  avcodec_descriptor_get(ost->enc_ctx->codec_id);
2906  if (input_descriptor)
2907  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
2908  if (output_descriptor)
2909  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
2910  if (input_props && output_props && input_props != output_props) {
2912  "Subtitle encoding currently only possible from text to text "
2913  "or bitmap to bitmap");
2914  return AVERROR_INVALIDDATA;
2915  }
2916  }
2917 
2918  break;
2919  case AVMEDIA_TYPE_DATA:
2920  break;
2921  default:
2922  abort();
2923  break;
2924  }
2925 
2926  if (ost->bitexact)
2927  enc_ctx->flags |= AV_CODEC_FLAG_BITEXACT;
2928 
2929  if (ost->sq_idx_encode >= 0)
2930  sq_set_tb(of->sq_encode, ost->sq_idx_encode, enc_ctx->time_base);
2931 
2932  ost->mux_timebase = enc_ctx->time_base;
2933 
2934  return 0;
2935 }
2936 
2938  char *error, int error_len)
2939 {
2940  int ret = 0;
2941 
2942  if (ost->enc_ctx) {
2943  const AVCodec *codec = ost->enc_ctx->codec;
2944  InputStream *ist = ost->ist;
2945 
2947  if (ret < 0)
2948  return ret;
2949 
2950  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2951  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2952 
2954  if (ret < 0) {
2955  snprintf(error, error_len, "Device setup failed for "
2956  "encoder on output stream #%d:%d : %s",
2957  ost->file_index, ost->index, av_err2str(ret));
2958  return ret;
2959  }
2960 
2961  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2962  if (ret == AVERROR_EXPERIMENTAL)
2963  abort_codec_experimental(codec, 1);
2964  snprintf(error, error_len,
2965  "Error while opening encoder for output stream #%d:%d - "
2966  "maybe incorrect parameters such as bit_rate, rate, width or height",
2967  ost->file_index, ost->index);
2968  return ret;
2969  }
2970  if (codec->type == AVMEDIA_TYPE_AUDIO &&
2972  av_buffersink_set_frame_size(ost->filter->filter,
2973  ost->enc_ctx->frame_size);
2974  assert_avoptions(ost->encoder_opts);
2975  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
2976  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
2977  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2978  " It takes bits/s as argument, not kbits/s\n");
2979 
2981  if (ret < 0) {
2983  "Error initializing the output stream codec context.\n");
2984  exit_program(1);
2985  }
2986 
2987  if (ost->enc_ctx->nb_coded_side_data) {
2988  int i;
2989 
2990  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
2991  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
2992  uint8_t *dst_data;
2993 
2994  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
2995  if (!dst_data)
2996  return AVERROR(ENOMEM);
2997  memcpy(dst_data, sd_src->data, sd_src->size);
2998  }
2999  }
3000 
3001  /*
3002  * Add global input side data. For now this is naive, and copies it
3003  * from the input stream's global side data. All side data should
3004  * really be funneled over AVFrame and libavfilter, then added back to
3005  * packet side data, and then potentially using the first packet for
3006  * global side data.
3007  */
3008  if (ist) {
3009  int i;
3010  for (i = 0; i < ist->st->nb_side_data; i++) {
3011  AVPacketSideData *sd = &ist->st->side_data[i];
3012  if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3013  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3014  if (!dst)
3015  return AVERROR(ENOMEM);
3016  memcpy(dst, sd->data, sd->size);
3017  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3018  av_display_rotation_set((int32_t *)dst, 0);
3019  }
3020  }
3021  }
3022 
3023  // copy timebase while removing common factors
3024  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3025  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3026 
3027  // copy estimated duration as a hint to the muxer
3028  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3029  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3030  } else if (ost->ist) {
3032  if (ret < 0)
3033  return ret;
3034  }
3035 
3036  ret = of_stream_init(output_files[ost->file_index], ost);
3037  if (ret < 0)
3038  return ret;
3039 
3040  return ret;
3041 }
3042 
3043 static int transcode_init(void)
3044 {
3045  int ret = 0;
3046  char error[1024] = {0};
3047 
3048  /* init framerate emulation */
3049  for (int i = 0; i < nb_input_files; i++) {
3051  if (ifile->readrate || ifile->rate_emu)
3052  for (int j = 0; j < ifile->nb_streams; j++)
3053  ifile->streams[j]->start = av_gettime_relative();
3054  }
3055 
3056  /* init input streams */
3057  for (InputStream *ist = ist_iter(NULL); ist; ist = ist_iter(ist))
3058  if ((ret = init_input_stream(ist, error, sizeof(error))) < 0)
3059  goto dump_format;
3060 
3061  /*
3062  * initialize stream copy and subtitle/data streams.
3063  * Encoded AVFrame based streams will get initialized as follows:
3064  * - when the first AVFrame is received in do_video_out
3065  * - just before the first AVFrame is received in either transcode_step
3066  * or reap_filters due to us requiring the filter chain buffer sink
3067  * to be configured with the correct audio frame size, which is only
3068  * known after the encoder is initialized.
3069  */
3070  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
3071  if (ost->enc_ctx &&
3074  continue;
3075 
3077  if (ret < 0)
3078  goto dump_format;
3079  }
3080 
3081  /* discard unused programs */
3082  for (int i = 0; i < nb_input_files; i++) {
3084  for (int j = 0; j < ifile->ctx->nb_programs; j++) {
3085  AVProgram *p = ifile->ctx->programs[j];
3086  int discard = AVDISCARD_ALL;
3087 
3088  for (int k = 0; k < p->nb_stream_indexes; k++)
3089  if (!ifile->streams[p->stream_index[k]]->discard) {
3091  break;
3092  }
3093  p->discard = discard;
3094  }
3095  }
3096 
3097  dump_format:
3098  /* dump the stream mapping */
3099  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3100  for (InputStream *ist = ist_iter(NULL); ist; ist = ist_iter(ist)) {
3101  for (int j = 0; j < ist->nb_filters; j++) {
3102  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3103  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3104  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3105  ist->filters[j]->name);
3106  if (nb_filtergraphs > 1)
3107  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3108  av_log(NULL, AV_LOG_INFO, "\n");
3109  }
3110  }
3111  }
3112 
3113  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
3114  if (ost->attachment_filename) {
3115  /* an attached file */
3116  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3117  ost->attachment_filename, ost->file_index, ost->index);
3118  continue;
3119  }
3120 
3121  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3122  /* output from a complex graph */
3123  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3124  if (nb_filtergraphs > 1)
3125  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3126 
3127  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3128  ost->index, ost->enc_ctx->codec->name);
3129  continue;
3130  }
3131 
3132  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3133  ost->ist->file_index,
3134  ost->ist->st->index,
3135  ost->file_index,
3136  ost->index);
3137  if (ost->enc_ctx) {
3138  const AVCodec *in_codec = ost->ist->dec;
3139  const AVCodec *out_codec = ost->enc_ctx->codec;
3140  const char *decoder_name = "?";
3141  const char *in_codec_name = "?";
3142  const char *encoder_name = "?";
3143  const char *out_codec_name = "?";
3144  const AVCodecDescriptor *desc;
3145 
3146  if (in_codec) {
3147  decoder_name = in_codec->name;
3148  desc = avcodec_descriptor_get(in_codec->id);
3149  if (desc)
3150  in_codec_name = desc->name;
3151  if (!strcmp(decoder_name, in_codec_name))
3152  decoder_name = "native";
3153  }
3154 
3155  if (out_codec) {
3156  encoder_name = out_codec->name;
3157  desc = avcodec_descriptor_get(out_codec->id);
3158  if (desc)
3159  out_codec_name = desc->name;
3160  if (!strcmp(encoder_name, out_codec_name))
3161  encoder_name = "native";
3162  }
3163 
3164  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3165  in_codec_name, decoder_name,
3166  out_codec_name, encoder_name);
3167  } else
3168  av_log(NULL, AV_LOG_INFO, " (copy)");
3169  av_log(NULL, AV_LOG_INFO, "\n");
3170  }
3171 
3172  if (ret) {
3173  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3174  return ret;
3175  }
3176 
3178 
3179  return 0;
3180 }
3181 
3182 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3183 static int need_output(void)
3184 {
3185  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
3186  if (ost->finished)
3187  continue;
3188 
3189  return 1;
3190  }
3191 
3192  return 0;
3193 }
3194 
3195 /**
3196  * Select the output stream to process.
3197  *
3198  * @return selected output stream, or NULL if none available
3199  */
3201 {
3202  int64_t opts_min = INT64_MAX;
3203  OutputStream *ost_min = NULL;
3204 
3205  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
3206  int64_t opts;
3207 
3208  if (ost->filter && ost->last_filter_pts != AV_NOPTS_VALUE) {
3209  opts = ost->last_filter_pts;
3210  } else {
3211  opts = ost->last_mux_dts == AV_NOPTS_VALUE ?
3212  INT64_MIN : ost->last_mux_dts;
3213  if (ost->last_mux_dts == AV_NOPTS_VALUE)
3215  "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3216  ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3217  }
3218 
3219  if (!ost->initialized && !ost->inputs_done)
3220  return ost->unavailable ? NULL : ost;
3221 
3222  if (!ost->finished && opts < opts_min) {
3223  opts_min = opts;
3224  ost_min = ost->unavailable ? NULL : ost;
3225  }
3226  }
3227  return ost_min;
3228 }
3229 
3230 static void set_tty_echo(int on)
3231 {
3232 #if HAVE_TERMIOS_H
3233  struct termios tty;
3234  if (tcgetattr(0, &tty) == 0) {
3235  if (on) tty.c_lflag |= ECHO;
3236  else tty.c_lflag &= ~ECHO;
3237  tcsetattr(0, TCSANOW, &tty);
3238  }
3239 #endif
3240 }
3241 
3242 static int check_keyboard_interaction(int64_t cur_time)
3243 {
3244  int i, ret, key;
3245  static int64_t last_time;
3246  if (received_nb_signals)
3247  return AVERROR_EXIT;
3248  /* read_key() returns 0 on EOF */
3249  if (cur_time - last_time >= 100000) {
3250  key = read_key();
3251  last_time = cur_time;
3252  }else
3253  key = -1;
3254  if (key == 'q') {
3255  av_log(NULL, AV_LOG_INFO, "\n\n[q] command received. Exiting.\n\n");
3256  return AVERROR_EXIT;
3257  }
3258  if (key == '+') av_log_set_level(av_log_get_level()+10);
3259  if (key == '-') av_log_set_level(av_log_get_level()-10);
3260  if (key == 's') qp_hist ^= 1;
3261  if (key == 'c' || key == 'C'){
3262  char buf[4096], target[64], command[256], arg[256] = {0};
3263  double time;
3264  int k, n = 0;
3265  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3266  i = 0;
3267  set_tty_echo(1);
3268  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3269  if (k > 0)
3270  buf[i++] = k;
3271  buf[i] = 0;
3272  set_tty_echo(0);
3273  fprintf(stderr, "\n");
3274  if (k > 0 &&
3275  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3276  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3277  target, time, command, arg);
3278  for (i = 0; i < nb_filtergraphs; i++) {
3279  FilterGraph *fg = filtergraphs[i];
3280  if (fg->graph) {
3281  if (time < 0) {
3282  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3283  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3284  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3285  } else if (key == 'c') {
3286  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3288  } else {
3289  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3290  if (ret < 0)
3291  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3292  }
3293  }
3294  }
3295  } else {
3297  "Parse error, at least 3 arguments were expected, "
3298  "only %d given in string '%s'\n", n, buf);
3299  }
3300  }
3301  if (key == 'd' || key == 'D'){
3302  int debug=0;
3303  if(key == 'D') {
3304  InputStream *ist = ist_iter(NULL);
3305 
3306  if (ist)
3307  debug = ist->dec_ctx->debug << 1;
3308 
3309  if(!debug) debug = 1;
3310  while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash
3311  debug += debug;
3312  }else{
3313  char buf[32];
3314  int k = 0;
3315  i = 0;
3316  set_tty_echo(1);
3317  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3318  if (k > 0)
3319  buf[i++] = k;
3320  buf[i] = 0;
3321  set_tty_echo(0);
3322  fprintf(stderr, "\n");
3323  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3324  fprintf(stderr,"error parsing debug value\n");
3325  }
3326  for (InputStream *ist = ist_iter(NULL); ist; ist = ist_iter(ist))
3327  ist->dec_ctx->debug = debug;
3329  if (ost->enc_ctx)
3330  ost->enc_ctx->debug = debug;
3331  }
3332  if(debug) av_log_set_level(AV_LOG_DEBUG);
3333  fprintf(stderr,"debug=%d\n", debug);
3334  }
3335  if (key == '?'){
3336  fprintf(stderr, "key function\n"
3337  "? show this help\n"
3338  "+ increase verbosity\n"
3339  "- decrease verbosity\n"
3340  "c Send command to first matching filter supporting it\n"
3341  "C Send/Queue command to all matching filters\n"
3342  "D cycle through available debug modes\n"
3343  "h dump packets/hex press to cycle through the 3 states\n"
3344  "q quit\n"
3345  "s Show QP histogram\n"
3346  );
3347  }
3348  return 0;
3349 }
3350 
3351 static int got_eagain(void)
3352 {
3353  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost))
3354  if (ost->unavailable)
3355  return 1;
3356  return 0;
3357 }
3358 
3359 static void reset_eagain(void)
3360 {
3361  int i;
3362  for (i = 0; i < nb_input_files; i++)
3363  input_files[i]->eagain = 0;
3364  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost))
3365  ost->unavailable = 0;
3366 }
3367 
3369 {
3370  for (int i = 0; i < ifile->nb_streams; i++) {
3371  InputStream *ist = ifile->streams[i];
3372  int ret;
3373 
3374  if (!ist->processing_needed)
3375  continue;
3376 
3377  do {
3378  ret = process_input_packet(ist, NULL, 1);
3379  } while (ret > 0);
3380 
3381  if (ist->decoding_needed) {
3382  /* report last frame duration to the demuxer thread */
3383  if (ist->par->codec_type == AVMEDIA_TYPE_AUDIO) {
3384  LastFrameDuration dur;
3385 
3386  dur.stream_idx = i;
3387  dur.duration = av_rescale_q(ist->nb_samples,
3388  (AVRational){ 1, ist->dec_ctx->sample_rate},
3389  ist->st->time_base);
3390 
3391  av_thread_message_queue_send(ifile->audio_duration_queue, &dur, 0);
3392  }
3393 
3395  }
3396  }
3397 }
3398 
3400  AVPacket *pkt)
3401 {
3402  const int fmt_is_discont = ifile->ctx->iformat->flags & AVFMT_TS_DISCONT;
3403  int disable_discontinuity_correction = copy_ts;
3404  int64_t pkt_dts = av_rescale_q_rnd(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q,
3406 
3407  if (copy_ts && ist->next_dts != AV_NOPTS_VALUE &&
3408  fmt_is_discont && ist->st->pts_wrap_bits < 60) {
3409  int64_t wrap_dts = av_rescale_q_rnd(pkt->dts + (1LL<<ist->st->pts_wrap_bits),
3410  ist->st->time_base, AV_TIME_BASE_Q,
3412  if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
3413  disable_discontinuity_correction = 0;
3414  }
3415 
3416  if (ist->next_dts != AV_NOPTS_VALUE && !disable_discontinuity_correction) {
3417  int64_t delta = pkt_dts - ist->next_dts;
3418  if (fmt_is_discont) {
3419  if (FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE ||
3420  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3421  ifile->ts_offset_discont -= delta;
3423  "timestamp discontinuity for stream #%d:%d "
3424  "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
3425  ist->file_index, ist->st->index, ist->st->id,
3427  delta, ifile->ts_offset_discont);
3429  if (pkt->pts != AV_NOPTS_VALUE)
3431  }
3432  } else {
3433  if (FFABS(delta) > 1LL * dts_error_threshold * AV_TIME_BASE) {
3434  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt->dts, ist->next_dts, pkt->stream_index);
3435  pkt->dts = AV_NOPTS_VALUE;
3436  }
3437  if (pkt->pts != AV_NOPTS_VALUE){
3438  int64_t pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
3439  delta = pkt_pts - ist->next_dts;
3440  if (FFABS(delta) > 1LL * dts_error_threshold * AV_TIME_BASE) {
3441  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt->pts, ist->next_dts, pkt->stream_index);
3442  pkt->pts = AV_NOPTS_VALUE;
3443  }
3444  }
3445  }
3446  } else if (ist->next_dts == AV_NOPTS_VALUE && !copy_ts &&
3447  fmt_is_discont && ifile->last_ts != AV_NOPTS_VALUE) {
3448  int64_t delta = pkt_dts - ifile->last_ts;
3449  if (FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE) {
3450  ifile->ts_offset_discont -= delta;
3452  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3453  delta, ifile->ts_offset_discont);
3455  if (pkt->pts != AV_NOPTS_VALUE)
3457  }
3458  }
3459 
3460  ifile->last_ts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
3461 }
3462 
3464  AVPacket *pkt)
3465 {
3466  int64_t offset = av_rescale_q(ifile->ts_offset_discont, AV_TIME_BASE_Q,
3467  ist->st->time_base);
3468 
3469  // apply previously-detected timestamp-discontinuity offset
3470  // (to all streams, not just audio/video)
3471  if (pkt->dts != AV_NOPTS_VALUE)
3472  pkt->dts += offset;
3473  if (pkt->pts != AV_NOPTS_VALUE)
3474  pkt->pts += offset;
3475 
3476  // detect timestamp discontinuities for audio/video
3477  if ((ist->par->codec_type == AVMEDIA_TYPE_VIDEO ||
3478  ist->par->codec_type == AVMEDIA_TYPE_AUDIO) &&
3479  pkt->dts != AV_NOPTS_VALUE)
3481 }
3482 
3483 /*
3484  * Return
3485  * - 0 -- one packet was read and processed
3486  * - AVERROR(EAGAIN) -- no packets were available for selected file,
3487  * this function should be called again
3488  * - AVERROR_EOF -- this function should not be called again
3489  */
3490 static int process_input(int file_index)
3491 {
3492  InputFile *ifile = input_files[file_index];
3494  InputStream *ist;
3495  AVPacket *pkt;
3496  int ret, i;
3497 
3498  is = ifile->ctx;
3500 
3501  if (ret == AVERROR(EAGAIN)) {
3502  ifile->eagain = 1;
3503  return ret;
3504  }
3505  if (ret == 1) {
3506  /* the input file is looped: flush the decoders */
3508  return AVERROR(EAGAIN);
3509  }
3510  if (ret < 0) {
3511  if (ret != AVERROR_EOF) {
3512  print_error(is->url, ret);
3513  if (exit_on_error)
3514  exit_program(1);
3515  }
3516 
3517  for (i = 0; i < ifile->nb_streams; i++) {
3518  ist = ifile->streams[i];
3519  if (ist->processing_needed) {
3520  ret = process_input_packet(ist, NULL, 0);
3521  if (ret>0)
3522  return 0;
3523  }
3524 
3525  /* mark all outputs that don't go through lavfi as finished */
3526  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
3527  if (ost->ist == ist &&
3528  (!ost->enc_ctx || ost->enc_ctx->codec_type == AVMEDIA_TYPE_SUBTITLE)) {
3529  OutputFile *of = output_files[ost->file_index];
3530  of_output_packet(of, ost->pkt, ost, 1);
3531  }
3532  }
3533  }
3534 
3535  ifile->eof_reached = 1;
3536  return AVERROR(EAGAIN);
3537  }
3538 
3539  reset_eagain();
3540 
3541  ist = ifile->streams[pkt->stream_index];
3542 
3543  ist->data_size += pkt->size;
3544  ist->nb_packets++;
3545 
3546  if (ist->discard)
3547  goto discard_packet;
3548 
3549  /* add the stream-global side data to the first packet */
3550  if (ist->nb_packets == 1) {
3551  for (i = 0; i < ist->st->nb_side_data; i++) {
3552  AVPacketSideData *src_sd = &ist->st->side_data[i];
3553  uint8_t *dst_data;
3554 
3555  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3556  continue;
3557 
3558  if (av_packet_get_side_data(pkt, src_sd->type, NULL))
3559  continue;
3560 
3561  dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size);
3562  if (!dst_data)
3563  report_and_exit(AVERROR(ENOMEM));
3564 
3565  memcpy(dst_data, src_sd->data, src_sd->size);
3566  }
3567  }
3568 
3569  // detect and try to correct for timestamp discontinuities
3571 
3572  if (debug_ts) {
3573  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s duration:%s duration_time:%s off:%s off_time:%s\n",
3574  ifile->index, pkt->stream_index,
3581  }
3582 
3583  sub2video_heartbeat(ist, pkt->pts);
3584 
3585  process_input_packet(ist, pkt, 0);
3586 
3587 discard_packet:
3588  av_packet_free(&pkt);
3589 
3590  return 0;
3591 }
3592 
3593 /**
3594  * Perform a step of transcoding for the specified filter graph.
3595  *
3596  * @param[in] graph filter graph to consider
3597  * @param[out] best_ist input stream where a frame would allow to continue
3598  * @return 0 for success, <0 for error
3599  */
3600 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
3601 {
3602  int i, ret;
3603  int nb_requests, nb_requests_max = 0;
3604  InputFilter *ifilter;
3605  InputStream *ist;
3606 
3607  *best_ist = NULL;
3609  if (ret >= 0)
3610  return reap_filters(0);
3611 
3612  if (ret == AVERROR_EOF) {
3613  ret = reap_filters(1);
3614  for (i = 0; i < graph->nb_outputs; i++)
3615  close_output_stream(graph->outputs[i]->ost);
3616  return ret;
3617  }
3618  if (ret != AVERROR(EAGAIN))
3619  return ret;
3620 
3621  for (i = 0; i < graph->nb_inputs; i++) {
3622  ifilter = graph->inputs[i];
3623  ist = ifilter->ist;
3624  if (input_files[ist->file_index]->eagain ||
3626  continue;
3627  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
3628  if (nb_requests > nb_requests_max) {
3629  nb_requests_max = nb_requests;
3630  *best_ist = ist;
3631  }
3632  }
3633 
3634  if (!*best_ist)
3635  for (i = 0; i < graph->nb_outputs; i++)
3636  graph->outputs[i]->ost->unavailable = 1;
3637 
3638  return 0;
3639 }
3640 
3641 /**
3642  * Run a single step of transcoding.
3643  *
3644  * @return 0 for success, <0 for error
3645  */
3646 static int transcode_step(void)
3647 {
3648  OutputStream *ost;
3649  InputStream *ist = NULL;
3650  int ret;
3651 
3652  ost = choose_output();
3653  if (!ost) {
3654  if (got_eagain()) {
3655  reset_eagain();
3656  av_usleep(10000);
3657  return 0;
3658  }
3659  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
3660  return AVERROR_EOF;
3661  }
3662 
3663  if (ost->filter && !ost->filter->graph->graph) {
3664  if (ifilter_has_all_input_formats(ost->filter->graph)) {
3665  ret = configure_filtergraph(ost->filter->graph);
3666  if (ret < 0) {
3667  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
3668  return ret;
3669  }
3670  }
3671  }
3672 
3673  if (ost->filter && ost->filter->graph->graph) {
3674  /*
3675  * Similar case to the early audio initialization in reap_filters.
3676  * Audio is special in ffmpeg.c currently as we depend on lavfi's
3677  * audio frame buffering/creation to get the output audio frame size
3678  * in samples correct. The audio frame size for the filter chain is
3679  * configured during the output stream initialization.
3680  *
3681  * Apparently avfilter_graph_request_oldest (called in
3682  * transcode_from_filter just down the line) peeks. Peeking already
3683  * puts one frame "ready to be given out", which means that any
3684  * update in filter buffer sink configuration afterwards will not
3685  * help us. And yes, even if it would be utilized,
3686  * av_buffersink_get_samples is affected, as it internally utilizes
3687  * the same early exit for peeked frames.
3688  *
3689  * In other words, if avfilter_graph_request_oldest would not make
3690  * further filter chain configuration or usage of
3691  * av_buffersink_get_samples useless (by just causing the return
3692  * of the peeked AVFrame as-is), we could get rid of this additional
3693  * early encoder initialization.
3694  */
3695  if (av_buffersink_get_type(ost->filter->filter) == AVMEDIA_TYPE_AUDIO)
3697 
3698  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
3699  return ret;
3700  if (!ist)
3701  return 0;
3702  } else if (ost->filter) {
3703  int i;
3704  for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
3705  InputFilter *ifilter = ost->filter->graph->inputs[i];
3706  if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
3707  ist = ifilter->ist;
3708  break;
3709  }
3710  }
3711  if (!ist) {
3712  ost->inputs_done = 1;
3713  return 0;
3714  }
3715  } else {
3716  ist = ost->ist;
3717  av_assert0(ist);
3718  }
3719 
3720  ret = process_input(ist->file_index);
3721  if (ret == AVERROR(EAGAIN)) {
3722  if (input_files[ist->file_index]->eagain)
3723  ost->unavailable = 1;
3724  return 0;
3725  }
3726 
3727  if (ret < 0)
3728  return ret == AVERROR_EOF ? 0 : ret;
3729 
3730  return reap_filters(0);
3731 }
3732 
3733 /*
3734  * The following code is the main loop of the file converter
3735  */
3736 static int transcode(void)
3737 {
3738  int ret, i;
3739  InputStream *ist;
3740  int64_t timer_start;
3741  int64_t total_packets_written = 0;
3742 
3743  ret = transcode_init();
3744  if (ret < 0)
3745  goto fail;
3746 
3747  if (stdin_interaction) {
3748  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
3749  }
3750 
3751  timer_start = av_gettime_relative();
3752 
3753  while (!received_sigterm) {
3754  int64_t cur_time= av_gettime_relative();
3755 
3756  /* if 'q' pressed, exits */
3757  if (stdin_interaction)
3758  if (check_keyboard_interaction(cur_time) < 0)
3759  break;
3760 
3761  /* check if there's any stream where output is still needed */
3762  if (!need_output()) {
3763  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
3764  break;
3765  }
3766 
3767  ret = transcode_step();
3768  if (ret < 0 && ret != AVERROR_EOF) {
3769  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
3770  break;
3771  }
3772 
3773  /* dump report by using the output first video and audio streams */
3774  print_report(0, timer_start, cur_time);
3775  }
3776 
3777  /* at the end of stream, we must flush the decoder buffers */
3778  for (ist = ist_iter(NULL); ist; ist = ist_iter(ist)) {
3779  if (!input_files[ist->file_index]->eof_reached) {
3780  process_input_packet(ist, NULL, 0);
3781  }
3782  }
3783  flush_encoders();
3784 
3785  term_exit();
3786 
3787  /* write the trailer if needed */
3788  for (i = 0; i < nb_output_files; i++) {
3790  if (ret < 0 && exit_on_error)
3791  exit_program(1);
3792  }
3793 
3794  /* dump report by using the first video and audio streams */
3795  print_report(1, timer_start, av_gettime_relative());
3796 
3797  /* close each encoder */
3798  for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
3799  uint64_t packets_written;
3800  packets_written = atomic_load(&ost->packets_written);
3801  total_packets_written += packets_written;
3802  if (!packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) {
3803  av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i);
3804  exit_program(1);
3805  }
3806  }
3807 
3808  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
3809  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
3810  exit_program(1);
3811  }
3812 
3814 
3815  /* finished ! */
3816  ret = 0;
3817 
3818  fail:
3819  return ret;
3820 }
3821 
3823 {
3824  BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
3825 #if HAVE_GETRUSAGE
3826  struct rusage rusage;
3827 
3828  getrusage(RUSAGE_SELF, &rusage);
3829  time_stamps.user_usec =
3830  (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
3831  time_stamps.sys_usec =
3832  (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
3833 #elif HAVE_GETPROCESSTIMES
3834  HANDLE proc;
3835  FILETIME c, e, k, u;
3836  proc = GetCurrentProcess();
3837  GetProcessTimes(proc, &c, &e, &k, &u);
3838  time_stamps.user_usec =
3839  ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
3840  time_stamps.sys_usec =
3841  ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
3842 #else
3843  time_stamps.user_usec = time_stamps.sys_usec = 0;
3844 #endif
3845  return time_stamps;
3846 }
3847 
3848 static int64_t getmaxrss(void)
3849 {
3850 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
3851  struct rusage rusage;
3852  getrusage(RUSAGE_SELF, &rusage);
3853  return (int64_t)rusage.ru_maxrss * 1024;
3854 #elif HAVE_GETPROCESSMEMORYINFO
3855  HANDLE proc;
3856  PROCESS_MEMORY_COUNTERS memcounters;
3857  proc = GetCurrentProcess();
3858  memcounters.cb = sizeof(memcounters);
3859  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
3860  return memcounters.PeakPagefileUsage;
3861 #else
3862  return 0;
3863 #endif
3864 }
3865 
3866 int main(int argc, char **argv)
3867 {
3868  int ret;
3870 
3871  init_dynload();
3872 
3874 
3875  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
3876 
3878  parse_loglevel(argc, argv, options);
3879 
3880 #if CONFIG_AVDEVICE
3882 #endif
3884 
3885  show_banner(argc, argv, options);
3886 
3887  /* parse options and open all input/output files */
3888  ret = ffmpeg_parse_options(argc, argv);
3889  if (ret < 0)
3890  exit_program(1);
3891 
3892  if (nb_output_files <= 0 && nb_input_files == 0) {
3893  show_usage();
3894  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3895  exit_program(1);
3896  }
3897 
3898  /* file converter / grab */
3899  if (nb_output_files <= 0) {
3900  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
3901  exit_program(1);
3902  }
3903 
3905  if (transcode() < 0)
3906  exit_program(1);
3907  if (do_benchmark) {
3908  int64_t utime, stime, rtime;
3910  utime = current_time.user_usec - ti.user_usec;
3911  stime = current_time.sys_usec - ti.sys_usec;
3912  rtime = current_time.real_usec - ti.real_usec;
3914  "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
3915  utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
3916  }
3917  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
3920  exit_program(69);
3921 
3923  return main_return_code;
3924 }
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:31
sub2video_copy_rect
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg.c:171
AVSubtitle
Definition: avcodec.h:2308
KeyframeForceCtx::pts
int64_t * pts
Definition: ffmpeg.h:501
avcodec_encode_subtitle
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub)
Definition: encode.c:164
InputFilter::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg.h:270
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:422
AVCodec
AVCodec.
Definition: codec.h:204
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
av_codec_get_id
enum AVCodecID av_codec_get_id(const struct AVCodecTag *const *tags, unsigned int tag)
Get the AVCodecID for the given codec tag tag.
AVFILTER_CMD_FLAG_ONE
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:756
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:241
need_output
static int need_output(void)
Definition: ffmpeg.c:3183
check_output_constraints
static int check_output_constraints(InputStream *ist, OutputStream *ost)
Definition: ffmpeg.c:1737
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
av_buffersink_get_sample_aspect_ratio
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
psnr
static double psnr(double d)
Definition: ffmpeg.c:741
AV_FIELD_PROGRESSIVE
@ AV_FIELD_PROGRESSIVE
Definition: codec_par.h:40
AVERROR_EXPERIMENTAL
#define AVERROR_EXPERIMENTAL
Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it.
Definition: error.h:74
KeyframeForceCtx::dropped_keyframe
int dropped_keyframe
Definition: ffmpeg.h:508
AV_CODEC_ID_AC3
@ AV_CODEC_ID_AC3
Definition: codec_id.h:438
ifilter_parameters_from_codecpar
static int ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
Definition: ffmpeg.c:1655
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:125
av_clip
#define av_clip
Definition: common.h:95
FKF_PREV_FORCED_T
@ FKF_PREV_FORCED_T
Definition: ffmpeg.h:475
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
reset_eagain
static void reset_eagain(void)
Definition: ffmpeg.c:3359
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:481
VSYNC_VFR
@ VSYNC_VFR
Definition: ffmpeg.h:62
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nb_input_files
int nb_input_files
Definition: ffmpeg.c:136
opt.h
configure_filtergraph
int configure_filtergraph(FilterGraph *fg)
Definition: ffmpeg_filter.c:963
ffmpeg_exited
static volatile int ffmpeg_exited
Definition: ffmpeg.c:325
AVCodecContext::get_format
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
Callback to negotiate the pixel format.
Definition: avcodec.h:662
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:58
LIBAVCODEC_IDENT
#define LIBAVCODEC_IDENT
Definition: version.h:43
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:975
av_compare_ts
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
libm.h
InputFilter::width
int width
Definition: ffmpeg.h:269
AVProgram::nb_stream_indexes
unsigned int nb_stream_indexes
Definition: avformat.h:1145
InputFilter::displaymatrix
int32_t * displaymatrix
Definition: ffmpeg.h:276
out
FILE * out
Definition: movenc.c:54
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:259
init_output_stream
static int init_output_stream(OutputStream *ost, AVFrame *frame, char *error, int error_len)
Definition: ffmpeg.c:2937
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1007
KF_FORCE_SOURCE_NO_DROP
@ KF_FORCE_SOURCE_NO_DROP
Definition: ffmpeg.h:492
FKF_PREV_FORCED_N
@ FKF_PREV_FORCED_N
Definition: ffmpeg.h:474
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:691
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
FilterGraph::graph_desc
const char * graph_desc
Definition: ffmpeg.h:307
AVCodecParameters
This struct describes the properties of an encoded stream.
Definition: codec_par.h:54
InputStream::data_size
uint64_t data_size
Definition: ffmpeg.h:416
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
AV_LOG_QUIET
#define AV_LOG_QUIET
Print no output.
Definition: log.h:162
sub
static float sub(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:31
AVFrame::duration
int64_t duration
Duration of the frame, in the same units as pts.
Definition: frame.h:714
thread.h
AV_RL64
uint64_t_TMPL AV_RL64
Definition: bytestream.h:91
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2858
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVFMT_VARIABLE_FPS
#define AVFMT_VARIABLE_FPS
Format allows variable fps.
Definition: avformat.h:484
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
FilterGraph::inputs
InputFilter ** inputs
Definition: ffmpeg.h:315
AV_DISPOSITION_ATTACHED_PIC
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:879
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:146
sub2video_heartbeat
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:260
avcodec_parameters_from_context
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: codec_par.c:99
remove_avoptions
void remove_avoptions(AVDictionary **a, AVDictionary *b)
Definition: ffmpeg.c:610
AVPictureType
AVPictureType
Definition: avutil.h:272
KeyframeForceCtx::nb_pts
int nb_pts
Definition: ffmpeg.h:502
frame_drop_threshold
float frame_drop_threshold
Definition: ffmpeg_opt.c:71
InputStream::dec_ctx
AVCodecContext * dec_ctx
Definition: ffmpeg.h:337
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
transcode_step
static int transcode_step(void)
Run a single step of transcoding.
Definition: ffmpeg.c:3646
BenchmarkTimeStamps::user_usec
int64_t user_usec
Definition: ffmpeg.c:118
AVSubtitleRect
Definition: avcodec.h:2280
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2312
AV_BUFFERSRC_FLAG_KEEP_REF
@ AV_BUFFERSRC_FLAG_KEEP_REF
Keep a reference to the frame.
Definition: buffersrc.h:53
ist_iter
InputStream * ist_iter(InputStream *prev)
Definition: ffmpeg.c:594
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:116
OutputFile::start_time
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:642
SQFRAME
#define SQFRAME(frame)
Definition: sync_queue.h:38
InputFilter::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg.h:273
ffmpeg_parse_options
int ffmpeg_parse_options(int argc, char **argv)
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:432
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:968
AVFrame::width
int width
Definition: frame.h:397
of_filesize
int64_t of_filesize(OutputFile *of)
Definition: ffmpeg_mux.c:733
OutputStream::unavailable
int unavailable
Definition: ffmpeg.h:588
AVPacketSideData
Definition: packet.h:315
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:223
w
uint8_t w
Definition: llviddspenc.c:38
avcodec_decode_subtitle2
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:863
FKF_T
@ FKF_T
Definition: ffmpeg.h:476
sync_queue.h
LastFrameDuration::stream_idx
int stream_idx
Definition: ffmpeg.h:430
AVPacket::data
uint8_t * data
Definition: packet.h:374
current_time
static BenchmarkTimeStamps current_time
Definition: ffmpeg.c:132
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:57
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:1004
ost_iter
static OutputStream * ost_iter(OutputStream *prev)
Definition: ffmpeg.c:578
AVFrame::top_field_first
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:482
dup_warning
static uint64_t dup_warning
Definition: ffmpeg.c:127
OutputStream::index
int index
Definition: ffmpeg.h:513
ATOMIC_VAR_INIT
#define ATOMIC_VAR_INIT(value)
Definition: stdatomic.h:31
b
#define b
Definition: input.c:41
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:159
AVStream::avg_frame_rate
AVRational avg_frame_rate
Average framerate.
Definition: avformat.h:1028
KeyframeForceCtx::type
int type
Definition: ffmpeg.h:496
nb_output_dumped
unsigned nb_output_dumped
Definition: ffmpeg.c:130
InputStream::nb_filters
int nb_filters
Definition: ffmpeg.h:401
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:75
AVCodecContext::subtitle_header
uint8_t * subtitle_header
Header containing style information for text subtitles.
Definition: avcodec.h:1708
version.h
transcode
static int transcode(void)
Definition: ffmpeg.c:3736
ffmpeg.h
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
BenchmarkTimeStamps::sys_usec
int64_t sys_usec
Definition: ffmpeg.c:119
progress_avio
AVIOContext * progress_avio
Definition: ffmpeg.c:133
AV_NOWARN_DEPRECATED
#define AV_NOWARN_DEPRECATED(code)
Disable warnings about deprecated features This is useful for sections of code kept for backward comp...
Definition: attributes.h:126
show_usage
void show_usage(void)
Definition: ffmpeg_opt.c:1205
do_audio_out
static void do_audio_out(OutputFile *of, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:932
FilterGraph::nb_inputs
int nb_inputs
Definition: ffmpeg.h:316
av_display_rotation_set
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure clockwise rotation by the specified angle (in de...
Definition: display.c:51
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:392
AVCodecParameters::codec_tag
uint32_t codec_tag
Additional information about the codec (corresponds to the AVI FOURCC).
Definition: codec_par.h:66
mathematics.h
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
AVDictionary
Definition: dict.c:32
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:571
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:263
decode_audio
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
Definition: ffmpeg.c:2006
program_birth_year
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffmpeg.c:112
LastFrameDuration
Definition: ffmpeg.h:429
InputStream::decoding_needed
int decoding_needed
Definition: ffmpeg.h:326
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:311
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:297
flush_encoders
static void flush_encoders(void)
Definition: ffmpeg.c:1673
ost
static AVStream * ost
Definition: vaapi_transcode.c:45
tf_sess_config.config
config
Definition: tf_sess_config.py:33
os_support.h
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:429
qp_hist
int qp_hist
Definition: ffmpeg_opt.c:83
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:73
term_exit_sigsafe
static void term_exit_sigsafe(void)
Definition: ffmpeg.c:308
init_output_stream_encode
static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:2761
ECHO
#define ECHO(name, type, min, max)
Definition: af_aecho.c:157
AVIOInterruptCB
Callback for checking whether to abort blocking functions.
Definition: avio.h:59
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:119
InputFilter::ist
struct InputStream * ist
Definition: ffmpeg.h:259
OutputFile::nb_streams
int nb_streams
Definition: ffmpeg.h:637
InputFile::eof_reached
int eof_reached
Definition: ffmpeg.h:438
exit_program
void exit_program(int ret)
Wraps exit with a program-specific cleanup routine.
Definition: cmdutils.c:99
InputStream
Definition: ffmpeg.h:321
filter_nbthreads
char * filter_nbthreads
Definition: ffmpeg_opt.c:86
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:1735
debug_ts
int debug_ts
Definition: ffmpeg_opt.c:79
got_eagain
static int got_eagain(void)
Definition: ffmpeg.c:3351
AVPacketSideData::size
size_t size
Definition: packet.h:317
ifilter_send_eof
static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
Definition: ffmpeg.c:1935
stats_period
int64_t stats_period
Definition: ffmpeg_opt.c:90
print_error
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:799
av_buffersink_set_frame_size
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
Definition: buffersink.c:204
dts_delta_threshold
float dts_delta_threshold
Definition: ffmpeg_opt.c:67
fifo.h
vstats_version
int vstats_version
Definition: ffmpeg_opt.c:88
submit_encode_frame
static int submit_encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:886
init_input_stream
static int init_input_stream(InputStream *ist, char *error, int error_len)
Definition: ffmpeg.c:2520
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:407
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:2059
report_and_exit
void report_and_exit(int ret)
Reports an error corresponding to the provided AVERROR code and calls exit_program() with the corresp...
Definition: cmdutils.c:93
InputStream::sub2video
struct InputStream::sub2video sub2video
fail
#define fail()
Definition: checkasm.h:134
InputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:262
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
av_shrink_packet
void av_shrink_packet(AVPacket *pkt, int size)
Reduce packet size, correctly zeroing padding.
Definition: avpacket.c:112
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
FFSIGN
#define FFSIGN(a)
Definition: common.h:65
samplefmt.h
InputStream::decoder_opts
AVDictionary * decoder_opts
Definition: ffmpeg.h:375
AVProgram::discard
enum AVDiscard discard
selects which program to discard and which to feed to the caller
Definition: avformat.h:1143
InputStream::filter_in_rescale_delta_last
int64_t filter_in_rescale_delta_last
Definition: ffmpeg.h:362
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:417
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:478
KeyframeForceCtx::ref_pts
int64_t ref_pts
Definition: ffmpeg.h:498
sub2video_get_blank_frame
static int sub2video_get_blank_frame(InputStream *ist)
Definition: ffmpeg.c:156
InputStream::nb_packets
uint64_t nb_packets
Definition: ffmpeg.h:418
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:709
sq_receive
int sq_receive(SyncQueue *sq, int stream_idx, SyncQueueFrame frame)
Read a frame from the queue.
Definition: sync_queue.c:339
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1389
pts
static int64_t pts
Definition: transcode_aac.c:654
hw_device_setup_for_decode
int hw_device_setup_for_decode(InputStream *ist)
Definition: ffmpeg_hw.c:317
us
#define us(width, name, range_min, range_max, subs,...)
Definition: cbs_h2645.c:276
AV_PKT_DATA_DISPLAYMATRIX
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:109
AV_CODEC_ID_MP3
@ AV_CODEC_ID_MP3
preferred ID for decoding MPEG audio layer 1, 2 or 3
Definition: codec_id.h:436
AVStream::duration
int64_t duration
Decoding: duration of the stream, in stream time base.
Definition: avformat.h:998
av_codec_get_tag2
int av_codec_get_tag2(const struct AVCodecTag *const *tags, enum AVCodecID id, unsigned int *tag)
Get the codec tag for the given codec id.
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
InputStream::sub2video::last_pts
int64_t last_pts
Definition: ffmpeg.h:390
AVRational::num
int num
Numerator.
Definition: rational.h:59
avformat_network_init
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:557
InputFile
Definition: ffmpeg.h:434
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:276
init_output_stream_streamcopy
static int init_output_stream_streamcopy(OutputStream *ost)
Definition: ffmpeg.c:2584
avsubtitle_free
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: avcodec.c:412
AV_DICT_DONT_STRDUP_VAL
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:79
AV_CODEC_ID_DVB_SUBTITLE
@ AV_CODEC_ID_DVB_SUBTITLE
Definition: codec_id.h:540
InputStream::last_pkt_repeat_pict
int last_pkt_repeat_pict
Definition: ffmpeg.h:360
ffmpeg_cleanup
static void ffmpeg_cleanup(int ret)
Definition: ffmpeg.c:494
InputStream::first_dts
int64_t first_dts
dts of the first packet read for this stream (in AV_TIME_BASE units)
Definition: ffmpeg.h:349
InputStream::hwaccel_pix_fmt
enum AVPixelFormat hwaccel_pix_fmt
Definition: ffmpeg.h:412
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:212
avassert.h
InputStream::dts
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
Definition: ffmpeg.h:350
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:961
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM
#define ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM
Definition: ffmpeg.h:481
decode_flush
static void decode_flush(InputFile *ifile)
Definition: ffmpeg.c:3368
av_thread_message_queue_send
int av_thread_message_queue_send(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Send a message on the queue.
Definition: threadmessage.c:158
choose_output
static OutputStream * choose_output(void)
Select the output stream to process.
Definition: ffmpeg.c:3200
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
BenchmarkTimeStamps::real_usec
int64_t real_usec
Definition: ffmpeg.c:117
duration
int64_t duration
Definition: movenc.c:64
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
AVCodecParameters::frame_size
int frame_size
Audio only.
Definition: codec_par.h:189
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:60
av_opt_set_dict
int av_opt_set_dict(void *obj, AVDictionary **options)
Set all the options from a given dictionary on an object.
Definition: opt.c:1767
float
float
Definition: af_crystalizer.c:122
HWACCEL_GENERIC
@ HWACCEL_GENERIC
Definition: ffmpeg.h:72
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:694
VSYNC_VSCFR
@ VSYNC_VSCFR
Definition: ffmpeg.h:63
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
AVCodec::supported_framerates
const AVRational * supported_framerates
array of supported framerates, or NULL if any, array is terminated by {0,0}
Definition: codec.h:225
llrintf
#define llrintf(x)
Definition: libm.h:399
AVCodecDescriptor
This struct describes the properties of a single codec described by an AVCodecID.
Definition: codec_desc.h:38
InputStream::cfr_next_pts
int64_t cfr_next_pts
Definition: ffmpeg.h:369
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:256
av_new_packet
int av_new_packet(AVPacket *pkt, int size)