FFmpeg
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 typedef struct BenchmarkTimeStamps {
124  int64_t real_usec;
125  int64_t user_usec;
126  int64_t sys_usec;
128 
129 static void do_video_stats(OutputStream *ost, int frame_size);
131 static int64_t getmaxrss(void);
133 
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 static unsigned nb_output_dumped = 0;
140 
141 static int want_sdp = 1;
142 
145 
146 static uint8_t *subtitle_out;
147 
152 
157 
160 
161 #if HAVE_TERMIOS_H
162 
163 /* init terminal so that we can grab keys */
164 static struct termios oldtty;
165 static int restore_tty;
166 #endif
167 
168 #if HAVE_THREADS
169 static void free_input_threads(void);
170 #endif
171 
172 /* sub2video hack:
173  Convert subtitles to video with alpha to insert them in filter graphs.
174  This is a temporary solution until libavfilter gets real subtitles support.
175  */
176 
178 {
179  int ret;
180  AVFrame *frame = ist->sub2video.frame;
181 
183  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
184  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185  ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
186  if ((ret = av_frame_get_buffer(frame, 0)) < 0)
187  return ret;
188  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
189  return 0;
190 }
191 
192 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
193  AVSubtitleRect *r)
194 {
195  uint32_t *pal, *dst2;
196  uint8_t *src, *src2;
197  int x, y;
198 
199  if (r->type != SUBTITLE_BITMAP) {
200  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
201  return;
202  }
203  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
204  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
205  r->x, r->y, r->w, r->h, w, h
206  );
207  return;
208  }
209 
210  dst += r->y * dst_linesize + r->x * 4;
211  src = r->data[0];
212  pal = (uint32_t *)r->data[1];
213  for (y = 0; y < r->h; y++) {
214  dst2 = (uint32_t *)dst;
215  src2 = src;
216  for (x = 0; x < r->w; x++)
217  *(dst2++) = pal[*(src2++)];
218  dst += dst_linesize;
219  src += r->linesize[0];
220  }
221 }
222 
223 static void sub2video_push_ref(InputStream *ist, int64_t pts)
224 {
225  AVFrame *frame = ist->sub2video.frame;
226  int i;
227  int ret;
228 
229  av_assert1(frame->data[0]);
230  ist->sub2video.last_pts = frame->pts = pts;
231  for (i = 0; i < ist->nb_filters; i++) {
232  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
235  if (ret != AVERROR_EOF && ret < 0)
236  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
237  av_err2str(ret));
238  }
239 }
240 
241 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
242 {
243  AVFrame *frame = ist->sub2video.frame;
244  int8_t *dst;
245  int dst_linesize;
246  int num_rects, i;
247  int64_t pts, end_pts;
248 
249  if (!frame)
250  return;
251  if (sub) {
252  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
253  AV_TIME_BASE_Q, ist->st->time_base);
254  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
255  AV_TIME_BASE_Q, ist->st->time_base);
256  num_rects = sub->num_rects;
257  } else {
258  /* If we are initializing the system, utilize current heartbeat
259  PTS as the start time, and show until the following subpicture
260  is received. Otherwise, utilize the previous subpicture's end time
261  as the fall-back value. */
262  pts = ist->sub2video.initialize ?
263  heartbeat_pts : ist->sub2video.end_pts;
264  end_pts = INT64_MAX;
265  num_rects = 0;
266  }
267  if (sub2video_get_blank_frame(ist) < 0) {
268  av_log(ist->dec_ctx, AV_LOG_ERROR,
269  "Impossible to get a blank canvas.\n");
270  return;
271  }
272  dst = frame->data [0];
273  dst_linesize = frame->linesize[0];
274  for (i = 0; i < num_rects; i++)
275  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
277  ist->sub2video.end_pts = end_pts;
278  ist->sub2video.initialize = 0;
279 }
280 
281 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
282 {
283  InputFile *infile = input_files[ist->file_index];
284  int i, j, nb_reqs;
285  int64_t pts2;
286 
287  /* When a frame is read from a file, examine all sub2video streams in
288  the same file and send the sub2video frame again. Otherwise, decoded
289  video frames could be accumulating in the filter graph while a filter
290  (possibly overlay) is desperately waiting for a subtitle frame. */
291  for (i = 0; i < infile->nb_streams; i++) {
292  InputStream *ist2 = input_streams[infile->ist_index + i];
293  if (!ist2->sub2video.frame)
294  continue;
295  /* subtitles seem to be usually muxed ahead of other streams;
296  if not, subtracting a larger time here is necessary */
297  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
298  /* do not send the heartbeat frame if the subtitle is already ahead */
299  if (pts2 <= ist2->sub2video.last_pts)
300  continue;
301  if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
302  /* if we have hit the end of the current displayed subpicture,
303  or if we need to initialize the system, update the
304  overlayed subpicture and its start/end times */
305  sub2video_update(ist2, pts2 + 1, NULL);
306  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
307  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
308  if (nb_reqs)
309  sub2video_push_ref(ist2, pts2);
310  }
311 }
312 
314 {
315  int i;
316  int ret;
317 
318  if (ist->sub2video.end_pts < INT64_MAX)
319  sub2video_update(ist, INT64_MAX, NULL);
320  for (i = 0; i < ist->nb_filters; i++) {
321  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
322  if (ret != AVERROR_EOF && ret < 0)
323  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
324  }
325 }
326 
327 /* end of sub2video hack */
328 
329 static void term_exit_sigsafe(void)
330 {
331 #if HAVE_TERMIOS_H
332  if(restore_tty)
333  tcsetattr (0, TCSANOW, &oldtty);
334 #endif
335 }
336 
337 void term_exit(void)
338 {
339  av_log(NULL, AV_LOG_QUIET, "%s", "");
341 }
342 
343 static volatile int received_sigterm = 0;
344 static volatile int received_nb_signals = 0;
346 static volatile int ffmpeg_exited = 0;
347 static int main_return_code = 0;
349 
350 static void
352 {
353  int ret;
354  received_sigterm = sig;
357  if(received_nb_signals > 3) {
358  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
359  strlen("Received > 3 system signals, hard exiting\n"));
360  if (ret < 0) { /* Do nothing */ };
361  exit(123);
362  }
363 }
364 
365 #if HAVE_SETCONSOLECTRLHANDLER
366 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
367 {
368  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
369 
370  switch (fdwCtrlType)
371  {
372  case CTRL_C_EVENT:
373  case CTRL_BREAK_EVENT:
374  sigterm_handler(SIGINT);
375  return TRUE;
376 
377  case CTRL_CLOSE_EVENT:
378  case CTRL_LOGOFF_EVENT:
379  case CTRL_SHUTDOWN_EVENT:
380  sigterm_handler(SIGTERM);
381  /* Basically, with these 3 events, when we return from this method the
382  process is hard terminated, so stall as long as we need to
383  to try and let the main thread(s) clean up and gracefully terminate
384  (we have at most 5 seconds, but should be done far before that). */
385  while (!ffmpeg_exited) {
386  Sleep(0);
387  }
388  return TRUE;
389 
390  default:
391  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
392  return FALSE;
393  }
394 }
395 #endif
396 
397 #ifdef __linux__
398 #define SIGNAL(sig, func) \
399  do { \
400  action.sa_handler = func; \
401  sigaction(sig, &action, NULL); \
402  } while (0)
403 #else
404 #define SIGNAL(sig, func) \
405  signal(sig, func)
406 #endif
407 
408 void term_init(void)
409 {
410 #if defined __linux__
411  struct sigaction action = {0};
412  action.sa_handler = sigterm_handler;
413 
414  /* block other interrupts while processing this one */
415  sigfillset(&action.sa_mask);
416 
417  /* restart interruptible functions (i.e. don't fail with EINTR) */
418  action.sa_flags = SA_RESTART;
419 #endif
420 
421 #if HAVE_TERMIOS_H
423  struct termios tty;
424  if (tcgetattr (0, &tty) == 0) {
425  oldtty = tty;
426  restore_tty = 1;
427 
428  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
429  |INLCR|IGNCR|ICRNL|IXON);
430  tty.c_oflag |= OPOST;
431  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
432  tty.c_cflag &= ~(CSIZE|PARENB);
433  tty.c_cflag |= CS8;
434  tty.c_cc[VMIN] = 1;
435  tty.c_cc[VTIME] = 0;
436 
437  tcsetattr (0, TCSANOW, &tty);
438  }
439  SIGNAL(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
440  }
441 #endif
442 
443  SIGNAL(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
444  SIGNAL(SIGTERM, sigterm_handler); /* Termination (ANSI). */
445 #ifdef SIGXCPU
446  SIGNAL(SIGXCPU, sigterm_handler);
447 #endif
448 #ifdef SIGPIPE
449  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
450 #endif
451 #if HAVE_SETCONSOLECTRLHANDLER
452  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
453 #endif
454 }
455 
456 /* read a key without blocking */
457 static int read_key(void)
458 {
459  unsigned char ch;
460 #if HAVE_TERMIOS_H
461  int n = 1;
462  struct timeval tv;
463  fd_set rfds;
464 
465  FD_ZERO(&rfds);
466  FD_SET(0, &rfds);
467  tv.tv_sec = 0;
468  tv.tv_usec = 0;
469  n = select(1, &rfds, NULL, NULL, &tv);
470  if (n > 0) {
471  n = read(0, &ch, 1);
472  if (n == 1)
473  return ch;
474 
475  return n;
476  }
477 #elif HAVE_KBHIT
478 # if HAVE_PEEKNAMEDPIPE
479  static int is_pipe;
480  static HANDLE input_handle;
481  DWORD dw, nchars;
482  if(!input_handle){
483  input_handle = GetStdHandle(STD_INPUT_HANDLE);
484  is_pipe = !GetConsoleMode(input_handle, &dw);
485  }
486 
487  if (is_pipe) {
488  /* When running under a GUI, you will end here. */
489  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
490  // input pipe may have been closed by the program that ran ffmpeg
491  return -1;
492  }
493  //Read it
494  if(nchars != 0) {
495  read(0, &ch, 1);
496  return ch;
497  }else{
498  return -1;
499  }
500  }
501 # endif
502  if(kbhit())
503  return(getch());
504 #endif
505  return -1;
506 }
507 
508 static int decode_interrupt_cb(void *ctx)
509 {
511 }
512 
514 
515 static void ffmpeg_cleanup(int ret)
516 {
517  int i, j;
518 
519  if (do_benchmark) {
520  int maxrss = getmaxrss() / 1024;
521  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
522  }
523 
524  for (i = 0; i < nb_filtergraphs; i++) {
525  FilterGraph *fg = filtergraphs[i];
527  for (j = 0; j < fg->nb_inputs; j++) {
528  InputFilter *ifilter = fg->inputs[j];
529  struct InputStream *ist = ifilter->ist;
530 
531  while (av_fifo_size(ifilter->frame_queue)) {
532  AVFrame *frame;
534  sizeof(frame), NULL);
536  }
537  av_fifo_freep(&ifilter->frame_queue);
538  av_freep(&ifilter->displaymatrix);
539  if (ist->sub2video.sub_queue) {
540  while (av_fifo_size(ist->sub2video.sub_queue)) {
541  AVSubtitle sub;
542  av_fifo_generic_read(ist->sub2video.sub_queue,
543  &sub, sizeof(sub), NULL);
545  }
546  av_fifo_freep(&ist->sub2video.sub_queue);
547  }
548  av_buffer_unref(&ifilter->hw_frames_ctx);
549  av_freep(&ifilter->name);
550  av_freep(&fg->inputs[j]);
551  }
552  av_freep(&fg->inputs);
553  for (j = 0; j < fg->nb_outputs; j++) {
554  OutputFilter *ofilter = fg->outputs[j];
555 
556  avfilter_inout_free(&ofilter->out_tmp);
557  av_freep(&ofilter->name);
558  av_freep(&ofilter->formats);
559  av_freep(&ofilter->channel_layouts);
560  av_freep(&ofilter->sample_rates);
561  av_freep(&fg->outputs[j]);
562  }
563  av_freep(&fg->outputs);
564  av_freep(&fg->graph_desc);
565 
567  }
569 
571 
572  /* close files */
573  for (i = 0; i < nb_output_files; i++) {
574  OutputFile *of = output_files[i];
576  if (!of)
577  continue;
578  s = of->ctx;
579  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
580  avio_closep(&s->pb);
582  av_dict_free(&of->opts);
583 
585  }
586  for (i = 0; i < nb_output_streams; i++) {
588 
589  if (!ost)
590  continue;
591 
593 
598 
601  av_freep(&ost->avfilter);
603 
606 
609 
612 
613  if (ost->muxing_queue) {
614  while (av_fifo_size(ost->muxing_queue)) {
615  AVPacket *pkt;
618  }
620  }
621 
623  }
624 #if HAVE_THREADS
625  free_input_threads();
626 #endif
627  for (i = 0; i < nb_input_files; i++) {
631  }
632  for (i = 0; i < nb_input_streams; i++) {
634 
635  av_frame_free(&ist->decoded_frame);
636  av_frame_free(&ist->filter_frame);
637  av_packet_free(&ist->pkt);
638  av_dict_free(&ist->decoder_opts);
639  avsubtitle_free(&ist->prev_sub.subtitle);
640  av_frame_free(&ist->sub2video.frame);
641  av_freep(&ist->filters);
642  av_freep(&ist->hwaccel_device);
643  av_freep(&ist->dts_buffer);
644 
645  avcodec_free_context(&ist->dec_ctx);
646 
648  }
649 
650  if (vstats_file) {
651  if (fclose(vstats_file))
653  "Error closing vstats file, loss of information possible: %s\n",
654  av_err2str(AVERROR(errno)));
655  }
658 
663 
664  uninit_opts();
665 
667 
668  if (received_sigterm) {
669  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
670  (int) received_sigterm);
671  } else if (ret && atomic_load(&transcode_init_done)) {
672  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
673  }
674  term_exit();
675  ffmpeg_exited = 1;
676 }
677 
679 {
680  AVDictionaryEntry *t = NULL;
681 
682  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
684  }
685 }
686 
688 {
690  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
691  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
692  exit_program(1);
693  }
694 }
695 
696 static void abort_codec_experimental(const AVCodec *c, int encoder)
697 {
698  exit_program(1);
699 }
700 
701 static void update_benchmark(const char *fmt, ...)
702 {
703  if (do_benchmark_all) {
705  va_list va;
706  char buf[1024];
707 
708  if (fmt) {
709  va_start(va, fmt);
710  vsnprintf(buf, sizeof(buf), fmt, va);
711  va_end(va);
713  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
716  t.real_usec - current_time.real_usec, buf);
717  }
718  current_time = t;
719  }
720 }
721 
723 {
724  int i;
725  for (i = 0; i < nb_output_streams; i++) {
726  OutputStream *ost2 = output_streams[i];
727  ost2->finished |= ost == ost2 ? this_stream : others;
728  }
729 }
730 
731 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
732 {
733  AVFormatContext *s = of->ctx;
734  AVStream *st = ost->st;
735  int ret;
736 
737  /*
738  * Audio encoders may split the packets -- #frames in != #packets out.
739  * But there is no reordering, so we can limit the number of output packets
740  * by simply dropping them here.
741  * Counting encoded video frames needs to be done separately because of
742  * reordering, see do_video_out().
743  * Do not count the packet when unqueued because it has been counted when queued.
744  */
745  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
746  if (ost->frame_number >= ost->max_frames) {
748  return;
749  }
750  ost->frame_number++;
751  }
752 
753  if (!of->header_written) {
754  AVPacket *tmp_pkt;
755  /* the muxer is not initialized yet, buffer the packet */
756  if (!av_fifo_space(ost->muxing_queue)) {
757  unsigned int are_we_over_size =
759  int new_size = are_we_over_size ?
763 
764  if (new_size <= av_fifo_size(ost->muxing_queue)) {
766  "Too many packets buffered for output stream %d:%d.\n",
767  ost->file_index, ost->st->index);
768  exit_program(1);
769  }
770  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
771  if (ret < 0)
772  exit_program(1);
773  }
775  if (ret < 0)
776  exit_program(1);
777  tmp_pkt = av_packet_alloc();
778  if (!tmp_pkt)
779  exit_program(1);
780  av_packet_move_ref(tmp_pkt, pkt);
781  ost->muxing_queue_data_size += tmp_pkt->size;
782  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
783  return;
784  }
785 
788  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
789 
791  int i;
793  NULL);
794  ost->quality = sd ? AV_RL32(sd) : -1;
795  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
796 
797  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
798  if (sd && i < sd[5])
799  ost->error[i] = AV_RL64(sd + 8 + 8*i);
800  else
801  ost->error[i] = -1;
802  }
803 
804  if (ost->frame_rate.num && ost->is_cfr) {
805  if (pkt->duration > 0)
806  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
808  ost->mux_timebase);
809  }
810  }
811 
813 
814  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
815  if (pkt->dts != AV_NOPTS_VALUE &&
816  pkt->pts != AV_NOPTS_VALUE &&
817  pkt->dts > pkt->pts) {
818  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
819  pkt->dts, pkt->pts,
820  ost->file_index, ost->st->index);
821  pkt->pts =
822  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
823  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
824  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
825  }
827  pkt->dts != AV_NOPTS_VALUE &&
830  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
831  if (pkt->dts < max) {
832  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
833  if (exit_on_error)
834  loglevel = AV_LOG_ERROR;
835  av_log(s, loglevel, "Non-monotonous DTS in output stream "
836  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
838  if (exit_on_error) {
839  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
840  exit_program(1);
841  }
842  av_log(s, loglevel, "changing to %"PRId64". This may result "
843  "in incorrect timestamps in the output file.\n",
844  max);
845  if (pkt->pts >= pkt->dts)
846  pkt->pts = FFMAX(pkt->pts, max);
847  pkt->dts = max;
848  }
849  }
850  }
851  ost->last_mux_dts = pkt->dts;
852 
853  ost->data_size += pkt->size;
854  ost->packets_written++;
855 
857 
858  if (debug_ts) {
859  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
860  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
864  pkt->size
865  );
866  }
867 
869  if (ret < 0) {
870  print_error("av_interleaved_write_frame()", ret);
871  main_return_code = 1;
873  }
874 }
875 
877 {
879 
881  if (of->shortest) {
883  of->recording_time = FFMIN(of->recording_time, end);
884  }
885 }
886 
887 /*
888  * Send a single packet to the output, applying any bitstream filters
889  * associated with the output stream. This may result in any number
890  * of packets actually being written, depending on what bitstream
891  * filters are applied. The supplied packet is consumed and will be
892  * blank (as if newly-allocated) when this function returns.
893  *
894  * If eof is set, instead indicate EOF to all bitstream filters and
895  * therefore flush any delayed packets to the output. A blank packet
896  * must be supplied in this case.
897  */
899  OutputStream *ost, int eof)
900 {
901  int ret = 0;
902 
903  /* apply the output bitstream filters */
904  if (ost->bsf_ctx) {
905  ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
906  if (ret < 0)
907  goto finish;
908  while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
909  write_packet(of, pkt, ost, 0);
910  if (ret == AVERROR(EAGAIN))
911  ret = 0;
912  } else if (!eof)
913  write_packet(of, pkt, ost, 0);
914 
915 finish:
916  if (ret < 0 && ret != AVERROR_EOF) {
917  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
918  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
919  if(exit_on_error)
920  exit_program(1);
921  }
922 }
923 
925 {
927 
928  if (of->recording_time != INT64_MAX &&
930  AV_TIME_BASE_Q) >= 0) {
932  return 0;
933  }
934  return 1;
935 }
936 
938  AVFrame *frame)
939 {
940  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
941  AVCodecContext *enc = ost->enc_ctx;
942  if (!frame || frame->pts == AV_NOPTS_VALUE ||
943  !enc || !ost->filter || !ost->filter->graph->graph)
944  goto early_exit;
945 
946  {
948 
949  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
951  AVRational tb = enc->time_base;
952  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
953 
954  tb.den <<= extra_bits;
955  float_pts =
956  av_rescale_q(frame->pts, filter_tb, tb) -
958  float_pts /= 1 << extra_bits;
959  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
960  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
961 
962  frame->pts =
963  av_rescale_q(frame->pts, filter_tb, enc->time_base) -
965  }
966 
967 early_exit:
968 
969  if (debug_ts) {
970  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
971  frame ? av_ts2str(frame->pts) : "NULL",
972  frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
973  float_pts,
974  enc ? enc->time_base.num : -1,
975  enc ? enc->time_base.den : -1);
976  }
977 
978  return float_pts;
979 }
980 
982  char *error, int error_len);
983 
985  unsigned int fatal)
986 {
987  int ret = AVERROR_BUG;
988  char error[1024] = {0};
989 
990  if (ost->initialized)
991  return 0;
992 
993  ret = init_output_stream(ost, frame, error, sizeof(error));
994  if (ret < 0) {
995  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
997 
998  if (fatal)
999  exit_program(1);
1000  }
1001 
1002  return ret;
1003 }
1004 
1006  AVFrame *frame)
1007 {
1008  AVCodecContext *enc = ost->enc_ctx;
1009  AVPacket *pkt = ost->pkt;
1010  int ret;
1011 
1013 
1014  if (!check_recording_time(ost))
1015  return;
1016 
1017  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
1018  frame->pts = ost->sync_opts;
1019  ost->sync_opts = frame->pts + frame->nb_samples;
1020  ost->samples_encoded += frame->nb_samples;
1021  ost->frames_encoded++;
1022 
1024  if (debug_ts) {
1025  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
1026  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1027  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
1028  enc->time_base.num, enc->time_base.den);
1029  }
1030 
1031  ret = avcodec_send_frame(enc, frame);
1032  if (ret < 0)
1033  goto error;
1034 
1035  while (1) {
1036  ret = avcodec_receive_packet(enc, pkt);
1037  if (ret == AVERROR(EAGAIN))
1038  break;
1039  if (ret < 0)
1040  goto error;
1041 
1042  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
1043 
1045 
1046  if (debug_ts) {
1047  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
1048  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1051  }
1052 
1053  output_packet(of, pkt, ost, 0);
1054  }
1055 
1056  return;
1057 error:
1058  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1059  exit_program(1);
1060 }
1061 
1062 static void do_subtitle_out(OutputFile *of,
1063  OutputStream *ost,
1064  AVSubtitle *sub)
1065 {
1066  int subtitle_out_max_size = 1024 * 1024;
1067  int subtitle_out_size, nb, i;
1068  AVCodecContext *enc;
1069  AVPacket *pkt = ost->pkt;
1070  int64_t pts;
1071 
1072  if (sub->pts == AV_NOPTS_VALUE) {
1073  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1074  if (exit_on_error)
1075  exit_program(1);
1076  return;
1077  }
1078 
1079  enc = ost->enc_ctx;
1080 
1081  if (!subtitle_out) {
1082  subtitle_out = av_malloc(subtitle_out_max_size);
1083  if (!subtitle_out) {
1084  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
1085  exit_program(1);
1086  }
1087  }
1088 
1089  /* Note: DVB subtitle need one packet to draw them and one other
1090  packet to clear them */
1091  /* XXX: signal it in the codec context ? */
1092  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1093  nb = 2;
1094  else
1095  nb = 1;
1096 
1097  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1098  pts = sub->pts;
1101  for (i = 0; i < nb; i++) {
1102  unsigned save_num_rects = sub->num_rects;
1103 
1105  if (!check_recording_time(ost))
1106  return;
1107 
1108  sub->pts = pts;
1109  // start_display_time is required to be 0
1110  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1111  sub->end_display_time -= sub->start_display_time;
1112  sub->start_display_time = 0;
1113  if (i == 1)
1114  sub->num_rects = 0;
1115 
1116  ost->frames_encoded++;
1117 
1118  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1119  subtitle_out_max_size, sub);
1120  if (i == 1)
1121  sub->num_rects = save_num_rects;
1122  if (subtitle_out_size < 0) {
1123  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1124  exit_program(1);
1125  }
1126 
1128  pkt->data = subtitle_out;
1129  pkt->size = subtitle_out_size;
1131  pkt->duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1132  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1133  /* XXX: the pts correction is handled here. Maybe handling
1134  it in the codec would be better */
1135  if (i == 0)
1136  pkt->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1137  else
1138  pkt->pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1139  }
1140  pkt->dts = pkt->pts;
1141  output_packet(of, pkt, ost, 0);
1142  }
1143 }
1144 
1145 static void do_video_out(OutputFile *of,
1146  OutputStream *ost,
1147  AVFrame *next_picture)
1148 {
1149  int ret, format_video_sync;
1150  AVPacket *pkt = ost->pkt;
1151  AVCodecContext *enc = ost->enc_ctx;
1152  AVRational frame_rate;
1153  int nb_frames, nb0_frames, i;
1154  double delta, delta0;
1155  double duration = 0;
1156  double sync_ipts = AV_NOPTS_VALUE;
1157  int frame_size = 0;
1158  InputStream *ist = NULL;
1160 
1161  init_output_stream_wrapper(ost, next_picture, 1);
1162  sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1163 
1164  if (ost->source_index >= 0)
1166 
1167  frame_rate = av_buffersink_get_frame_rate(filter);
1168  if (frame_rate.num > 0 && frame_rate.den > 0)
1169  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1170 
1171  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1173 
1174  if (!ost->filters_script &&
1175  !ost->filters &&
1176  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1177  next_picture &&
1178  ist &&
1179  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1180  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1181  }
1182 
1183  if (!next_picture) {
1184  //end, flushing
1185  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1186  ost->last_nb0_frames[1],
1187  ost->last_nb0_frames[2]);
1188  } else {
1189  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1190  delta = delta0 + duration;
1191 
1192  /* by default, we output a single frame */
1193  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1194  nb_frames = 1;
1195 
1196  format_video_sync = video_sync_method;
1197  if (format_video_sync == VSYNC_AUTO) {
1198  if(!strcmp(of->ctx->oformat->name, "avi")) {
1199  format_video_sync = VSYNC_VFR;
1200  } else
1201  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1202  if ( ist
1203  && format_video_sync == VSYNC_CFR
1204  && input_files[ist->file_index]->ctx->nb_streams == 1
1205  && input_files[ist->file_index]->input_ts_offset == 0) {
1206  format_video_sync = VSYNC_VSCFR;
1207  }
1208  if (format_video_sync == VSYNC_CFR && copy_ts) {
1209  format_video_sync = VSYNC_VSCFR;
1210  }
1211  }
1212  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1213 
1214  if (delta0 < 0 &&
1215  delta > 0 &&
1216  format_video_sync != VSYNC_PASSTHROUGH &&
1217  format_video_sync != VSYNC_DROP) {
1218  if (delta0 < -0.6) {
1219  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1220  } else
1221  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1222  sync_ipts = ost->sync_opts;
1223  duration += delta0;
1224  delta0 = 0;
1225  }
1226 
1227  switch (format_video_sync) {
1228  case VSYNC_VSCFR:
1229  if (ost->frame_number == 0 && delta0 >= 0.5) {
1230  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1231  delta = duration;
1232  delta0 = 0;
1233  ost->sync_opts = llrint(sync_ipts);
1234  }
1235  case VSYNC_CFR:
1236  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1237  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1238  nb_frames = 0;
1239  } else if (delta < -1.1)
1240  nb_frames = 0;
1241  else if (delta > 1.1) {
1242  nb_frames = lrintf(delta);
1243  if (delta0 > 1.1)
1244  nb0_frames = llrintf(delta0 - 0.6);
1245  }
1246  break;
1247  case VSYNC_VFR:
1248  if (delta <= -0.6)
1249  nb_frames = 0;
1250  else if (delta > 0.6)
1251  ost->sync_opts = llrint(sync_ipts);
1252  break;
1253  case VSYNC_DROP:
1254  case VSYNC_PASSTHROUGH:
1255  ost->sync_opts = llrint(sync_ipts);
1256  break;
1257  default:
1258  av_assert0(0);
1259  }
1260  }
1261 
1262  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1263  nb0_frames = FFMIN(nb0_frames, nb_frames);
1264 
1265  memmove(ost->last_nb0_frames + 1,
1267  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1268  ost->last_nb0_frames[0] = nb0_frames;
1269 
1270  if (nb0_frames == 0 && ost->last_dropped) {
1271  nb_frames_drop++;
1273  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1275  }
1276  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1277  if (nb_frames > dts_error_threshold * 30) {
1278  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1279  nb_frames_drop++;
1280  return;
1281  }
1282  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1283  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1284  if (nb_frames_dup > dup_warning) {
1285  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1286  dup_warning *= 10;
1287  }
1288  }
1289  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1290  ost->dropped_keyframe = ost->last_dropped && next_picture && next_picture->key_frame;
1291 
1292  /* duplicates frame if needed */
1293  for (i = 0; i < nb_frames; i++) {
1294  AVFrame *in_picture;
1295  int forced_keyframe = 0;
1296  double pts_time;
1297 
1298  if (i < nb0_frames && ost->last_frame) {
1299  in_picture = ost->last_frame;
1300  } else
1301  in_picture = next_picture;
1302 
1303  if (!in_picture)
1304  return;
1305 
1306  in_picture->pts = ost->sync_opts;
1307 
1308  if (!check_recording_time(ost))
1309  return;
1310 
1311  in_picture->quality = enc->global_quality;
1312  in_picture->pict_type = 0;
1313 
1315  in_picture->pts != AV_NOPTS_VALUE)
1316  ost->forced_kf_ref_pts = in_picture->pts;
1317 
1318  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1319  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1321  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1322  ost->forced_kf_index++;
1323  forced_keyframe = 1;
1324  } else if (ost->forced_keyframes_pexpr) {
1325  double res;
1329  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1335  res);
1336  if (res) {
1337  forced_keyframe = 1;
1343  }
1344 
1346  } else if ( ost->forced_keyframes
1347  && !strncmp(ost->forced_keyframes, "source", 6)
1348  && in_picture->key_frame==1
1349  && !i) {
1350  forced_keyframe = 1;
1351  } else if ( ost->forced_keyframes
1352  && !strncmp(ost->forced_keyframes, "source_no_drop", 14)
1353  && !i) {
1354  forced_keyframe = (in_picture->key_frame == 1) || ost->dropped_keyframe;
1355  ost->dropped_keyframe = 0;
1356  }
1357 
1358  if (forced_keyframe) {
1359  in_picture->pict_type = AV_PICTURE_TYPE_I;
1360  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1361  }
1362 
1364  if (debug_ts) {
1365  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1366  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1367  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1368  enc->time_base.num, enc->time_base.den);
1369  }
1370 
1371  ost->frames_encoded++;
1372 
1373  ret = avcodec_send_frame(enc, in_picture);
1374  if (ret < 0)
1375  goto error;
1376  // Make sure Closed Captions will not be duplicated
1378 
1379  while (1) {
1380  ret = avcodec_receive_packet(enc, pkt);
1381  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1382  if (ret == AVERROR(EAGAIN))
1383  break;
1384  if (ret < 0)
1385  goto error;
1386 
1387  if (debug_ts) {
1388  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1389  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1392  }
1393 
1395  pkt->pts = ost->sync_opts;
1396 
1398 
1399  if (debug_ts) {
1400  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1401  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1404  }
1405 
1406  frame_size = pkt->size;
1407  output_packet(of, pkt, ost, 0);
1408 
1409  /* if two pass, output log */
1410  if (ost->logfile && enc->stats_out) {
1411  fprintf(ost->logfile, "%s", enc->stats_out);
1412  }
1413  }
1414  ost->sync_opts++;
1415  /*
1416  * For video, number of frames in == number of packets out.
1417  * But there may be reordering, so we can't throw away frames on encoder
1418  * flush, we need to limit them here, before they go into encoder.
1419  */
1420  ost->frame_number++;
1421 
1422  if (vstats_filename && frame_size)
1424  }
1425 
1426  if (!ost->last_frame)
1429  if (next_picture && ost->last_frame)
1430  av_frame_ref(ost->last_frame, next_picture);
1431  else
1433 
1434  return;
1435 error:
1436  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1437  exit_program(1);
1438 }
1439 
1440 static double psnr(double d)
1441 {
1442  return -10.0 * log10(d);
1443 }
1444 
1446 {
1447  AVCodecContext *enc;
1448  int frame_number;
1449  double ti1, bitrate, avg_bitrate;
1450 
1451  /* this is executed just the first time do_video_stats is called */
1452  if (!vstats_file) {
1453  vstats_file = fopen(vstats_filename, "w");
1454  if (!vstats_file) {
1455  perror("fopen");
1456  exit_program(1);
1457  }
1458  }
1459 
1460  enc = ost->enc_ctx;
1461  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1462  frame_number = ost->st->nb_frames;
1463  if (vstats_version <= 1) {
1464  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1465  ost->quality / (float)FF_QP2LAMBDA);
1466  } else {
1467  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1468  ost->quality / (float)FF_QP2LAMBDA);
1469  }
1470 
1471  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1472  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1473 
1474  fprintf(vstats_file,"f_size= %6d ", frame_size);
1475  /* compute pts value */
1477  if (ti1 < 0.01)
1478  ti1 = 0.01;
1479 
1480  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1481  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1482  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1483  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1484  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1485  }
1486 }
1487 
1489 {
1491  int i;
1492 
1494 
1495  if (of->shortest) {
1496  for (i = 0; i < of->ctx->nb_streams; i++)
1498  }
1499 }
1500 
1501 /**
1502  * Get and encode new output from any of the filtergraphs, without causing
1503  * activity.
1504  *
1505  * @return 0 for success, <0 for severe errors
1506  */
1507 static int reap_filters(int flush)
1508 {
1509  AVFrame *filtered_frame = NULL;
1510  int i;
1511 
1512  /* Reap all buffers present in the buffer sinks */
1513  for (i = 0; i < nb_output_streams; i++) {
1517  AVCodecContext *enc = ost->enc_ctx;
1518  int ret = 0;
1519 
1520  if (!ost->filter || !ost->filter->graph->graph)
1521  continue;
1522  filter = ost->filter->filter;
1523 
1524  /*
1525  * Unlike video, with audio the audio frame size matters.
1526  * Currently we are fully reliant on the lavfi filter chain to
1527  * do the buffering deed for us, and thus the frame size parameter
1528  * needs to be set accordingly. Where does one get the required
1529  * frame size? From the initialized AVCodecContext of an audio
1530  * encoder. Thus, if we have gotten to an audio stream, initialize
1531  * the encoder earlier than receiving the first AVFrame.
1532  */
1535 
1536  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1537  return AVERROR(ENOMEM);
1538  }
1539  filtered_frame = ost->filtered_frame;
1540 
1541  while (1) {
1542  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1544  if (ret < 0) {
1545  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1547  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1548  } else if (flush && ret == AVERROR_EOF) {
1550  do_video_out(of, ost, NULL);
1551  }
1552  break;
1553  }
1554  if (ost->finished) {
1555  av_frame_unref(filtered_frame);
1556  continue;
1557  }
1558 
1559  switch (av_buffersink_get_type(filter)) {
1560  case AVMEDIA_TYPE_VIDEO:
1561  if (!ost->frame_aspect_ratio.num)
1562  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1563 
1564  do_video_out(of, ost, filtered_frame);
1565  break;
1566  case AVMEDIA_TYPE_AUDIO:
1567  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1568  enc->channels != filtered_frame->channels) {
1570  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1571  break;
1572  }
1573  do_audio_out(of, ost, filtered_frame);
1574  break;
1575  default:
1576  // TODO support subtitle filters
1577  av_assert0(0);
1578  }
1579 
1580  av_frame_unref(filtered_frame);
1581  }
1582  }
1583 
1584  return 0;
1585 }
1586 
1587 static void print_final_stats(int64_t total_size)
1588 {
1589  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1590  uint64_t subtitle_size = 0;
1591  uint64_t data_size = 0;
1592  float percent = -1.0;
1593  int i, j;
1594  int pass1_used = 1;
1595 
1596  for (i = 0; i < nb_output_streams; i++) {
1598  switch (ost->enc_ctx->codec_type) {
1599  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1600  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1601  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1602  default: other_size += ost->data_size; break;
1603  }
1604  extra_size += ost->enc_ctx->extradata_size;
1605  data_size += ost->data_size;
1608  pass1_used = 0;
1609  }
1610 
1611  if (data_size && total_size>0 && total_size >= data_size)
1612  percent = 100.0 * (total_size - data_size) / data_size;
1613 
1614  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1615  video_size / 1024.0,
1616  audio_size / 1024.0,
1617  subtitle_size / 1024.0,
1618  other_size / 1024.0,
1619  extra_size / 1024.0);
1620  if (percent >= 0.0)
1621  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1622  else
1623  av_log(NULL, AV_LOG_INFO, "unknown");
1624  av_log(NULL, AV_LOG_INFO, "\n");
1625 
1626  /* print verbose per-stream stats */
1627  for (i = 0; i < nb_input_files; i++) {
1628  InputFile *f = input_files[i];
1629  uint64_t total_packets = 0, total_size = 0;
1630 
1631  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1632  i, f->ctx->url);
1633 
1634  for (j = 0; j < f->nb_streams; j++) {
1635  InputStream *ist = input_streams[f->ist_index + j];
1636  enum AVMediaType type = ist->dec_ctx->codec_type;
1637 
1638  total_size += ist->data_size;
1639  total_packets += ist->nb_packets;
1640 
1641  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1642  i, j, media_type_string(type));
1643  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1644  ist->nb_packets, ist->data_size);
1645 
1646  if (ist->decoding_needed) {
1647  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1648  ist->frames_decoded);
1649  if (type == AVMEDIA_TYPE_AUDIO)
1650  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1651  av_log(NULL, AV_LOG_VERBOSE, "; ");
1652  }
1653 
1654  av_log(NULL, AV_LOG_VERBOSE, "\n");
1655  }
1656 
1657  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1658  total_packets, total_size);
1659  }
1660 
1661  for (i = 0; i < nb_output_files; i++) {
1662  OutputFile *of = output_files[i];
1663  uint64_t total_packets = 0, total_size = 0;
1664 
1665  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1666  i, of->ctx->url);
1667 
1668  for (j = 0; j < of->ctx->nb_streams; j++) {
1671 
1672  total_size += ost->data_size;
1673  total_packets += ost->packets_written;
1674 
1675  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1676  i, j, media_type_string(type));
1677  if (ost->encoding_needed) {
1678  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1679  ost->frames_encoded);
1680  if (type == AVMEDIA_TYPE_AUDIO)
1681  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1682  av_log(NULL, AV_LOG_VERBOSE, "; ");
1683  }
1684 
1685  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1687 
1688  av_log(NULL, AV_LOG_VERBOSE, "\n");
1689  }
1690 
1691  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1692  total_packets, total_size);
1693  }
1694  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1695  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1696  if (pass1_used) {
1697  av_log(NULL, AV_LOG_WARNING, "\n");
1698  } else {
1699  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1700  }
1701  }
1702 }
1703 
1704 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1705 {
1706  AVBPrint buf, buf_script;
1707  OutputStream *ost;
1708  AVFormatContext *oc;
1709  int64_t total_size;
1710  AVCodecContext *enc;
1711  int frame_number, vid, i;
1712  double bitrate;
1713  double speed;
1714  int64_t pts = INT64_MIN + 1;
1715  static int64_t last_time = -1;
1716  static int first_report = 1;
1717  static int qp_histogram[52];
1718  int hours, mins, secs, us;
1719  const char *hours_sign;
1720  int ret;
1721  float t;
1722 
1723  if (!print_stats && !is_last_report && !progress_avio)
1724  return;
1725 
1726  if (!is_last_report) {
1727  if (last_time == -1) {
1728  last_time = cur_time;
1729  }
1730  if (((cur_time - last_time) < stats_period && !first_report) ||
1731  (first_report && nb_output_dumped < nb_output_files))
1732  return;
1733  last_time = cur_time;
1734  }
1735 
1736  t = (cur_time-timer_start) / 1000000.0;
1737 
1738 
1739  oc = output_files[0]->ctx;
1740 
1741  total_size = avio_size(oc->pb);
1742  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1743  total_size = avio_tell(oc->pb);
1744 
1745  vid = 0;
1747  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1748  for (i = 0; i < nb_output_streams; i++) {
1749  float q = -1;
1750  ost = output_streams[i];
1751  enc = ost->enc_ctx;
1752  if (!ost->stream_copy)
1753  q = ost->quality / (float) FF_QP2LAMBDA;
1754 
1755  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1756  av_bprintf(&buf, "q=%2.1f ", q);
1757  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1758  ost->file_index, ost->index, q);
1759  }
1760  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1761  float fps;
1762 
1763  frame_number = ost->frame_number;
1764  fps = t > 1 ? frame_number / t : 0;
1765  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1766  frame_number, fps < 9.95, fps, q);
1767  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1768  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1769  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1770  ost->file_index, ost->index, q);
1771  if (is_last_report)
1772  av_bprintf(&buf, "L");
1773  if (qp_hist) {
1774  int j;
1775  int qp = lrintf(q);
1776  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1777  qp_histogram[qp]++;
1778  for (j = 0; j < 32; j++)
1779  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1780  }
1781 
1782  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1783  int j;
1784  double error, error_sum = 0;
1785  double scale, scale_sum = 0;
1786  double p;
1787  char type[3] = { 'Y','U','V' };
1788  av_bprintf(&buf, "PSNR=");
1789  for (j = 0; j < 3; j++) {
1790  if (is_last_report) {
1791  error = enc->error[j];
1792  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1793  } else {
1794  error = ost->error[j];
1795  scale = enc->width * enc->height * 255.0 * 255.0;
1796  }
1797  if (j)
1798  scale /= 4;
1799  error_sum += error;
1800  scale_sum += scale;
1801  p = psnr(error / scale);
1802  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1803  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1804  ost->file_index, ost->index, type[j] | 32, p);
1805  }
1806  p = psnr(error_sum / scale_sum);
1807  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1808  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1809  ost->file_index, ost->index, p);
1810  }
1811  vid = 1;
1812  }
1813  /* compute min output value */
1817  if (copy_ts) {
1818  if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1822  }
1823  }
1824 
1825  if (is_last_report)
1827  }
1828 
1829  secs = FFABS(pts) / AV_TIME_BASE;
1830  us = FFABS(pts) % AV_TIME_BASE;
1831  mins = secs / 60;
1832  secs %= 60;
1833  hours = mins / 60;
1834  mins %= 60;
1835  hours_sign = (pts < 0) ? "-" : "";
1836 
1837  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1838  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1839 
1840  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1841  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1842  if (pts == AV_NOPTS_VALUE) {
1843  av_bprintf(&buf, "N/A ");
1844  } else {
1845  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1846  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1847  }
1848 
1849  if (bitrate < 0) {
1850  av_bprintf(&buf, "bitrate=N/A");
1851  av_bprintf(&buf_script, "bitrate=N/A\n");
1852  }else{
1853  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1854  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1855  }
1856 
1857  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1858  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1859  if (pts == AV_NOPTS_VALUE) {
1860  av_bprintf(&buf_script, "out_time_us=N/A\n");
1861  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1862  av_bprintf(&buf_script, "out_time=N/A\n");
1863  } else {
1864  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1865  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1866  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1867  hours_sign, hours, mins, secs, us);
1868  }
1869 
1871  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1872  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1873  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1874 
1875  if (speed < 0) {
1876  av_bprintf(&buf, " speed=N/A");
1877  av_bprintf(&buf_script, "speed=N/A\n");
1878  } else {
1879  av_bprintf(&buf, " speed=%4.3gx", speed);
1880  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1881  }
1882 
1883  if (print_stats || is_last_report) {
1884  const char end = is_last_report ? '\n' : '\r';
1885  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1886  fprintf(stderr, "%s %c", buf.str, end);
1887  } else
1888  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1889 
1890  fflush(stderr);
1891  }
1892  av_bprint_finalize(&buf, NULL);
1893 
1894  if (progress_avio) {
1895  av_bprintf(&buf_script, "progress=%s\n",
1896  is_last_report ? "end" : "continue");
1897  avio_write(progress_avio, buf_script.str,
1898  FFMIN(buf_script.len, buf_script.size - 1));
1900  av_bprint_finalize(&buf_script, NULL);
1901  if (is_last_report) {
1902  if ((ret = avio_closep(&progress_avio)) < 0)
1904  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1905  }
1906  }
1907 
1908  first_report = 0;
1909 
1910  if (is_last_report)
1911  print_final_stats(total_size);
1912 }
1913 
1915 {
1916  // We never got any input. Set a fake format, which will
1917  // come from libavformat.
1918  ifilter->format = par->format;
1919  ifilter->sample_rate = par->sample_rate;
1920  ifilter->channels = par->channels;
1921  ifilter->channel_layout = par->channel_layout;
1922  ifilter->width = par->width;
1923  ifilter->height = par->height;
1924  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1925 }
1926 
1927 static void flush_encoders(void)
1928 {
1929  int i, ret;
1930 
1931  for (i = 0; i < nb_output_streams; i++) {
1933  AVCodecContext *enc = ost->enc_ctx;
1935 
1936  if (!ost->encoding_needed)
1937  continue;
1938 
1939  // Try to enable encoding with no input frames.
1940  // Maybe we should just let encoding fail instead.
1941  if (!ost->initialized) {
1942  FilterGraph *fg = ost->filter->graph;
1943 
1945  "Finishing stream %d:%d without any data written to it.\n",
1946  ost->file_index, ost->st->index);
1947 
1948  if (ost->filter && !fg->graph) {
1949  int x;
1950  for (x = 0; x < fg->nb_inputs; x++) {
1951  InputFilter *ifilter = fg->inputs[x];
1952  if (ifilter->format < 0)
1953  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1954  }
1955 
1957  continue;
1958 
1959  ret = configure_filtergraph(fg);
1960  if (ret < 0) {
1961  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1962  exit_program(1);
1963  }
1964 
1966  }
1967 
1969  }
1970 
1972  continue;
1973 
1974  for (;;) {
1975  const char *desc = NULL;
1976  AVPacket *pkt = ost->pkt;
1977  int pkt_size;
1978 
1979  switch (enc->codec_type) {
1980  case AVMEDIA_TYPE_AUDIO:
1981  desc = "audio";
1982  break;
1983  case AVMEDIA_TYPE_VIDEO:
1984  desc = "video";
1985  break;
1986  default:
1987  av_assert0(0);
1988  }
1989 
1991 
1992  while ((ret = avcodec_receive_packet(enc, pkt)) == AVERROR(EAGAIN)) {
1993  ret = avcodec_send_frame(enc, NULL);
1994  if (ret < 0) {
1995  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1996  desc,
1997  av_err2str(ret));
1998  exit_program(1);
1999  }
2000  }
2001 
2002  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
2003  if (ret < 0 && ret != AVERROR_EOF) {
2004  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
2005  desc,
2006  av_err2str(ret));
2007  exit_program(1);
2008  }
2009  if (ost->logfile && enc->stats_out) {
2010  fprintf(ost->logfile, "%s", enc->stats_out);
2011  }
2012  if (ret == AVERROR_EOF) {
2013  output_packet(of, pkt, ost, 1);
2014  break;
2015  }
2016  if (ost->finished & MUXER_FINISHED) {
2018  continue;
2019  }
2021  pkt_size = pkt->size;
2022  output_packet(of, pkt, ost, 0);
2024  do_video_stats(ost, pkt_size);
2025  }
2026  }
2027  }
2028 }
2029 
2030 /*
2031  * Check whether a packet from ist should be written into ost at this time
2032  */
2034 {
2036  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2037 
2038  if (ost->source_index != ist_index)
2039  return 0;
2040 
2041  if (ost->finished)
2042  return 0;
2043 
2044  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
2045  return 0;
2046 
2047  return 1;
2048 }
2049 
2051 {
2053  InputFile *f = input_files [ist->file_index];
2054  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2055  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2056  AVPacket *opkt = ost->pkt;
2057 
2058  av_packet_unref(opkt);
2059  // EOF: flush output bitstream filters.
2060  if (!pkt) {
2061  output_packet(of, opkt, ost, 1);
2062  return;
2063  }
2064 
2065  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2067  return;
2068 
2069  if (!ost->frame_number && !ost->copy_prior_start) {
2070  int64_t comp_start = start_time;
2071  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2072  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2073  if (pkt->pts == AV_NOPTS_VALUE ?
2074  ist->pts < comp_start :
2075  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2076  return;
2077  }
2078 
2079  if (of->recording_time != INT64_MAX &&
2080  ist->pts >= of->recording_time + start_time) {
2082  return;
2083  }
2084 
2085  if (f->recording_time != INT64_MAX) {
2086  start_time = 0;
2087  if (copy_ts) {
2088  start_time += f->start_time != AV_NOPTS_VALUE ? f->start_time : 0;
2089  start_time += start_at_zero ? 0 : f->ctx->start_time;
2090  }
2091  if (ist->pts >= f->recording_time + start_time) {
2093  return;
2094  }
2095  }
2096 
2097  /* force the input stream PTS */
2099  ost->sync_opts++;
2100 
2101  if (av_packet_ref(opkt, pkt) < 0)
2102  exit_program(1);
2103 
2104  if (pkt->pts != AV_NOPTS_VALUE)
2105  opkt->pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2106 
2107  if (pkt->dts == AV_NOPTS_VALUE) {
2109  } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2110  int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2111  if(!duration)
2112  duration = ist->dec_ctx->frame_size;
2113  opkt->dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2114  (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2115  &ist->filter_in_rescale_delta_last, ost->mux_timebase);
2116  /* dts will be set immediately afterwards to what pts is now */
2117  opkt->pts = opkt->dts - ost_tb_start_time;
2118  } else
2119  opkt->dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2120  opkt->dts -= ost_tb_start_time;
2121 
2122  opkt->duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2123 
2124  output_packet(of, opkt, ost, 0);
2125 }
2126 
2128 {
2129  AVCodecContext *dec = ist->dec_ctx;
2130 
2131  if (!dec->channel_layout) {
2132  char layout_name[256];
2133 
2134  if (dec->channels > ist->guess_layout_max)
2135  return 0;
2136  dec->channel_layout = av_get_default_channel_layout(dec->channels);
2137  if (!dec->channel_layout)
2138  return 0;
2139  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2140  dec->channels, dec->channel_layout);
2141  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2142  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2143  }
2144  return 1;
2145 }
2146 
2148 {
2149  if (*got_output || ret<0)
2150  decode_error_stat[ret<0] ++;
2151 
2152  if (ret < 0 && exit_on_error)
2153  exit_program(1);
2154 
2155  if (*got_output && ist) {
2156  if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2158  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2159  if (exit_on_error)
2160  exit_program(1);
2161  }
2162  }
2163 }
2164 
2165 // Filters can be configured only if the formats of all inputs are known.
2167 {
2168  int i;
2169  for (i = 0; i < fg->nb_inputs; i++) {
2170  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2171  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2172  return 0;
2173  }
2174  return 1;
2175 }
2176 
2178 {
2179  FilterGraph *fg = ifilter->graph;
2180  AVFrameSideData *sd;
2181  int need_reinit, ret, i;
2182 
2183  /* determine if the parameters for this input changed */
2184  need_reinit = ifilter->format != frame->format;
2185 
2186  switch (ifilter->ist->st->codecpar->codec_type) {
2187  case AVMEDIA_TYPE_AUDIO:
2188  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2189  ifilter->channels != frame->channels ||
2190  ifilter->channel_layout != frame->channel_layout;
2191  break;
2192  case AVMEDIA_TYPE_VIDEO:
2193  need_reinit |= ifilter->width != frame->width ||
2194  ifilter->height != frame->height;
2195  break;
2196  }
2197 
2198  if (!ifilter->ist->reinit_filters && fg->graph)
2199  need_reinit = 0;
2200 
2201  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2202  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2203  need_reinit = 1;
2204 
2206  if (!ifilter->displaymatrix || memcmp(sd->data, ifilter->displaymatrix, sizeof(int32_t) * 9))
2207  need_reinit = 1;
2208  } else if (ifilter->displaymatrix)
2209  need_reinit = 1;
2210 
2211  if (need_reinit) {
2213  if (ret < 0)
2214  return ret;
2215  }
2216 
2217  /* (re)init the graph if possible, otherwise buffer the frame and return */
2218  if (need_reinit || !fg->graph) {
2219  for (i = 0; i < fg->nb_inputs; i++) {
2220  if (!ifilter_has_all_input_formats(fg)) {
2222  if (!tmp)
2223  return AVERROR(ENOMEM);
2225 
2226  if (!av_fifo_space(ifilter->frame_queue)) {
2227  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2228  if (ret < 0) {
2229  av_frame_free(&tmp);
2230  return ret;
2231  }
2232  }
2233  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2234  return 0;
2235  }
2236  }
2237 
2238  ret = reap_filters(1);
2239  if (ret < 0 && ret != AVERROR_EOF) {
2240  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2241  return ret;
2242  }
2243 
2244  ret = configure_filtergraph(fg);
2245  if (ret < 0) {
2246  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2247  return ret;
2248  }
2249  }
2250 
2252  if (ret < 0) {
2253  if (ret != AVERROR_EOF)
2254  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2255  return ret;
2256  }
2257 
2258  return 0;
2259 }
2260 
2261 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2262 {
2263  int ret;
2264 
2265  ifilter->eof = 1;
2266 
2267  if (ifilter->filter) {
2269  if (ret < 0)
2270  return ret;
2271  } else {
2272  // the filtergraph was never configured
2273  if (ifilter->format < 0)
2274  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2275  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2276  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2277  return AVERROR_INVALIDDATA;
2278  }
2279  }
2280 
2281  return 0;
2282 }
2283 
2284 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2285 // There is the following difference: if you got a frame, you must call
2286 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2287 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2288 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2289 {
2290  int ret;
2291 
2292  *got_frame = 0;
2293 
2294  if (pkt) {
2295  ret = avcodec_send_packet(avctx, pkt);
2296  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2297  // decoded frames with avcodec_receive_frame() until done.
2298  if (ret < 0 && ret != AVERROR_EOF)
2299  return ret;
2300  }
2301 
2302  ret = avcodec_receive_frame(avctx, frame);
2303  if (ret < 0 && ret != AVERROR(EAGAIN))
2304  return ret;
2305  if (ret >= 0)
2306  *got_frame = 1;
2307 
2308  return 0;
2309 }
2310 
2312 {
2313  int i, ret;
2314  AVFrame *f;
2315 
2316  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2317  for (i = 0; i < ist->nb_filters; i++) {
2318  if (i < ist->nb_filters - 1) {
2319  f = ist->filter_frame;
2321  if (ret < 0)
2322  break;
2323  } else
2324  f = decoded_frame;
2325  ret = ifilter_send_frame(ist->filters[i], f);
2326  if (ret == AVERROR_EOF)
2327  ret = 0; /* ignore */
2328  if (ret < 0) {
2330  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2331  break;
2332  }
2333  }
2334  return ret;
2335 }
2336 
2338  int *decode_failed)
2339 {
2341  AVCodecContext *avctx = ist->dec_ctx;
2342  int ret, err = 0;
2343  AVRational decoded_frame_tb;
2344 
2345  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2346  return AVERROR(ENOMEM);
2347  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2348  return AVERROR(ENOMEM);
2349  decoded_frame = ist->decoded_frame;
2350 
2352  ret = decode(avctx, decoded_frame, got_output, pkt);
2353  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2354  if (ret < 0)
2355  *decode_failed = 1;
2356 
2357  if (ret >= 0 && avctx->sample_rate <= 0) {
2358  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2360  }
2361 
2362  if (ret != AVERROR_EOF)
2364 
2365  if (!*got_output || ret < 0)
2366  return ret;
2367 
2368  ist->samples_decoded += decoded_frame->nb_samples;
2369  ist->frames_decoded++;
2370 
2371  /* increment next_dts to use for the case where the input stream does not
2372  have timestamps or there are multiple frames in the packet */
2373  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2374  avctx->sample_rate;
2375  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2376  avctx->sample_rate;
2377 
2378  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2379  decoded_frame_tb = ist->st->time_base;
2380  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2381  decoded_frame->pts = pkt->pts;
2382  decoded_frame_tb = ist->st->time_base;
2383  }else {
2384  decoded_frame->pts = ist->dts;
2385  decoded_frame_tb = AV_TIME_BASE_Q;
2386  }
2388  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2389  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2390  (AVRational){1, avctx->sample_rate});
2391  ist->nb_samples = decoded_frame->nb_samples;
2393 
2394  av_frame_unref(ist->filter_frame);
2396  return err < 0 ? err : ret;
2397 }
2398 
2399 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2400  int *decode_failed)
2401 {
2403  int i, ret = 0, err = 0;
2404  int64_t best_effort_timestamp;
2405  int64_t dts = AV_NOPTS_VALUE;
2406 
2407  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2408  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2409  // skip the packet.
2410  if (!eof && pkt && pkt->size == 0)
2411  return 0;
2412 
2413  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2414  return AVERROR(ENOMEM);
2415  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2416  return AVERROR(ENOMEM);
2417  decoded_frame = ist->decoded_frame;
2418  if (ist->dts != AV_NOPTS_VALUE)
2419  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2420  if (pkt) {
2421  pkt->dts = dts; // ffmpeg.c probably shouldn't do this
2422  }
2423 
2424  // The old code used to set dts on the drain packet, which does not work
2425  // with the new API anymore.
2426  if (eof) {
2427  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2428  if (!new)
2429  return AVERROR(ENOMEM);
2430  ist->dts_buffer = new;
2431  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2432  }
2433 
2435  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt);
2436  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2437  if (ret < 0)
2438  *decode_failed = 1;
2439 
2440  // The following line may be required in some cases where there is no parser
2441  // or the parser does not has_b_frames correctly
2442  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2443  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2444  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2445  } else
2446  av_log(ist->dec_ctx, AV_LOG_WARNING,
2447  "video_delay is larger in decoder than demuxer %d > %d.\n"
2448  "If you want to help, upload a sample "
2449  "of this file to https://streams.videolan.org/upload/ "
2450  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2451  ist->dec_ctx->has_b_frames,
2452  ist->st->codecpar->video_delay);
2453  }
2454 
2455  if (ret != AVERROR_EOF)
2457 
2458  if (*got_output && ret >= 0) {
2459  if (ist->dec_ctx->width != decoded_frame->width ||
2460  ist->dec_ctx->height != decoded_frame->height ||
2461  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2462  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2466  ist->dec_ctx->width,
2467  ist->dec_ctx->height,
2468  ist->dec_ctx->pix_fmt);
2469  }
2470  }
2471 
2472  if (!*got_output || ret < 0)
2473  return ret;
2474 
2475  if(ist->top_field_first>=0)
2476  decoded_frame->top_field_first = ist->top_field_first;
2477 
2478  ist->frames_decoded++;
2479 
2480  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2481  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2482  if (err < 0)
2483  goto fail;
2484  }
2485  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2486 
2487  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2488  *duration_pts = decoded_frame->pkt_duration;
2489 
2490  if (ist->framerate.num)
2491  best_effort_timestamp = ist->cfr_next_pts++;
2492 
2493  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2494  best_effort_timestamp = ist->dts_buffer[0];
2495 
2496  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2497  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2498  ist->nb_dts_buffer--;
2499  }
2500 
2501  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2502  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2503 
2504  if (ts != AV_NOPTS_VALUE)
2505  ist->next_pts = ist->pts = ts;
2506  }
2507 
2508  if (debug_ts) {
2509  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2510  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2511  ist->st->index, av_ts2str(decoded_frame->pts),
2512  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2513  best_effort_timestamp,
2514  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2516  ist->st->time_base.num, ist->st->time_base.den);
2517  }
2518 
2519  if (ist->st->sample_aspect_ratio.num)
2520  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2521 
2523 
2524 fail:
2525  av_frame_unref(ist->filter_frame);
2527  return err < 0 ? err : ret;
2528 }
2529 
2531  int *decode_failed)
2532 {
2534  int free_sub = 1;
2535  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2536  &subtitle, got_output, pkt);
2537 
2539 
2540  if (ret < 0 || !*got_output) {
2541  *decode_failed = 1;
2542  if (!pkt->size)
2544  return ret;
2545  }
2546 
2547  if (ist->fix_sub_duration) {
2548  int end = 1;
2549  if (ist->prev_sub.got_output) {
2550  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2551  1000, AV_TIME_BASE);
2552  if (end < ist->prev_sub.subtitle.end_display_time) {
2553  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2554  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2555  ist->prev_sub.subtitle.end_display_time, end,
2556  end <= 0 ? ", dropping it" : "");
2557  ist->prev_sub.subtitle.end_display_time = end;
2558  }
2559  }
2560  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2561  FFSWAP(int, ret, ist->prev_sub.ret);
2562  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2563  if (end <= 0)
2564  goto out;
2565  }
2566 
2567  if (!*got_output)
2568  return ret;
2569 
2570  if (ist->sub2video.frame) {
2571  sub2video_update(ist, INT64_MIN, &subtitle);
2572  } else if (ist->nb_filters) {
2573  if (!ist->sub2video.sub_queue)
2574  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2575  if (!ist->sub2video.sub_queue)
2576  exit_program(1);
2577  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2578  ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2579  if (ret < 0)
2580  exit_program(1);
2581  }
2582  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2583  free_sub = 0;
2584  }
2585 
2586  if (!subtitle.num_rects)
2587  goto out;
2588 
2589  ist->frames_decoded++;
2590 
2591  for (i = 0; i < nb_output_streams; i++) {
2593 
2595  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2596  continue;
2597 
2599  }
2600 
2601 out:
2602  if (free_sub)
2604  return ret;
2605 }
2606 
2608 {
2609  int i, ret;
2610  /* TODO keep pts also in stream time base to avoid converting back */
2611  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2613 
2614  for (i = 0; i < ist->nb_filters; i++) {
2615  ret = ifilter_send_eof(ist->filters[i], pts);
2616  if (ret < 0)
2617  return ret;
2618  }
2619  return 0;
2620 }
2621 
2622 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2623 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2624 {
2625  int ret = 0, i;
2626  int repeating = 0;
2627  int eof_reached = 0;
2628 
2629  AVPacket *avpkt = ist->pkt;
2630 
2631  if (!ist->saw_first_ts) {
2632  ist->first_dts =
2633  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2634  ist->pts = 0;
2635  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2636  ist->first_dts =
2637  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2638  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2639  }
2640  ist->saw_first_ts = 1;
2641  }
2642 
2643  if (ist->next_dts == AV_NOPTS_VALUE)
2644  ist->next_dts = ist->dts;
2645  if (ist->next_pts == AV_NOPTS_VALUE)
2646  ist->next_pts = ist->pts;
2647 
2648  if (pkt) {
2649  av_packet_unref(avpkt);
2650  ret = av_packet_ref(avpkt, pkt);
2651  if (ret < 0)
2652  return ret;
2653  }
2654 
2655  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2656  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2657  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2658  ist->next_pts = ist->pts = ist->dts;
2659  }
2660 
2661  // while we have more to decode or while the decoder did output something on EOF
2662  while (ist->decoding_needed) {
2663  int64_t duration_dts = 0;
2664  int64_t duration_pts = 0;
2665  int got_output = 0;
2666  int decode_failed = 0;
2667 
2668  ist->pts = ist->next_pts;
2669  ist->dts = ist->next_dts;
2670 
2671  switch (ist->dec_ctx->codec_type) {
2672  case AVMEDIA_TYPE_AUDIO:
2673  ret = decode_audio (ist, repeating ? NULL : avpkt, &got_output,
2674  &decode_failed);
2675  av_packet_unref(avpkt);
2676  break;
2677  case AVMEDIA_TYPE_VIDEO:
2678  ret = decode_video (ist, repeating ? NULL : avpkt, &got_output, &duration_pts, !pkt,
2679  &decode_failed);
2680  if (!repeating || !pkt || got_output) {
2681  if (pkt && pkt->duration) {
2682  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2683  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2684  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2685  duration_dts = ((int64_t)AV_TIME_BASE *
2686  ist->dec_ctx->framerate.den * ticks) /
2687  ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2688  }
2689 
2690  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2691  ist->next_dts += duration_dts;
2692  }else
2693  ist->next_dts = AV_NOPTS_VALUE;
2694  }
2695 
2696  if (got_output) {
2697  if (duration_pts > 0) {
2698  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2699  } else {
2700  ist->next_pts += duration_dts;
2701  }
2702  }
2703  av_packet_unref(avpkt);
2704  break;
2705  case AVMEDIA_TYPE_SUBTITLE:
2706  if (repeating)
2707  break;
2708  ret = transcode_subtitles(ist, avpkt, &got_output, &decode_failed);
2709  if (!pkt && ret >= 0)
2710  ret = AVERROR_EOF;
2711  av_packet_unref(avpkt);
2712  break;
2713  default:
2714  return -1;
2715  }
2716 
2717  if (ret == AVERROR_EOF) {
2718  eof_reached = 1;
2719  break;
2720  }
2721 
2722  if (ret < 0) {
2723  if (decode_failed) {
2724  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2725  ist->file_index, ist->st->index, av_err2str(ret));
2726  } else {
2727  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2728  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2729  }
2730  if (!decode_failed || exit_on_error)
2731  exit_program(1);
2732  break;
2733  }
2734 
2735  if (got_output)
2736  ist->got_output = 1;
2737 
2738  if (!got_output)
2739  break;
2740 
2741  // During draining, we might get multiple output frames in this loop.
2742  // ffmpeg.c does not drain the filter chain on configuration changes,
2743  // which means if we send multiple frames at once to the filters, and
2744  // one of those frames changes configuration, the buffered frames will
2745  // be lost. This can upset certain FATE tests.
2746  // Decode only 1 frame per call on EOF to appease these FATE tests.
2747  // The ideal solution would be to rewrite decoding to use the new
2748  // decoding API in a better way.
2749  if (!pkt)
2750  break;
2751 
2752  repeating = 1;
2753  }
2754 
2755  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2756  /* except when looping we need to flush but not to send an EOF */
2757  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2758  int ret = send_filter_eof(ist);
2759  if (ret < 0) {
2760  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2761  exit_program(1);
2762  }
2763  }
2764 
2765  /* handle stream copy */
2766  if (!ist->decoding_needed && pkt) {
2767  ist->dts = ist->next_dts;
2768  switch (ist->dec_ctx->codec_type) {
2769  case AVMEDIA_TYPE_AUDIO:
2770  av_assert1(pkt->duration >= 0);
2771  if (ist->dec_ctx->sample_rate) {
2772  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2773  ist->dec_ctx->sample_rate;
2774  } else {
2775  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2776  }
2777  break;
2778  case AVMEDIA_TYPE_VIDEO:
2779  if (ist->framerate.num) {
2780  // TODO: Remove work-around for c99-to-c89 issue 7
2781  AVRational time_base_q = AV_TIME_BASE_Q;
2782  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2783  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2784  } else if (pkt->duration) {
2785  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2786  } else if(ist->dec_ctx->framerate.num != 0) {
2787  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2788  ist->next_dts += ((int64_t)AV_TIME_BASE *
2789  ist->dec_ctx->framerate.den * ticks) /
2790  ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2791  }
2792  break;
2793  }
2794  ist->pts = ist->dts;
2795  ist->next_pts = ist->next_dts;
2796  }
2797  for (i = 0; i < nb_output_streams; i++) {
2799 
2801  continue;
2802 
2803  do_streamcopy(ist, ost, pkt);
2804  }
2805 
2806  return !eof_reached;
2807 }
2808 
2809 static void print_sdp(void)
2810 {
2811  char sdp[16384];
2812  int i;
2813  int j;
2814  AVIOContext *sdp_pb;
2815  AVFormatContext **avc;
2816 
2817  for (i = 0; i < nb_output_files; i++) {
2818  if (!output_files[i]->header_written)
2819  return;
2820  }
2821 
2822  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2823  if (!avc)
2824  exit_program(1);
2825  for (i = 0, j = 0; i < nb_output_files; i++) {
2826  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2827  avc[j] = output_files[i]->ctx;
2828  j++;
2829  }
2830  }
2831 
2832  if (!j)
2833  goto fail;
2834 
2835  av_sdp_create(avc, j, sdp, sizeof(sdp));
2836 
2837  if (!sdp_filename) {
2838  printf("SDP:\n%s\n", sdp);
2839  fflush(stdout);
2840  } else {
2841  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2842  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2843  } else {
2844  avio_print(sdp_pb, sdp);
2845  avio_closep(&sdp_pb);
2847  }
2848  }
2849 
2850 fail:
2851  av_freep(&avc);
2852 }
2853 
2855 {
2856  InputStream *ist = s->opaque;
2857  const enum AVPixelFormat *p;
2858  int ret;
2859 
2860  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2862  const AVCodecHWConfig *config = NULL;
2863  int i;
2864 
2865  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2866  break;
2867 
2868  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2869  ist->hwaccel_id == HWACCEL_AUTO) {
2870  for (i = 0;; i++) {
2871  config = avcodec_get_hw_config(s->codec, i);
2872  if (!config)
2873  break;
2874  if (!(config->methods &
2876  continue;
2877  if (config->pix_fmt == *p)
2878  break;
2879  }
2880  }
2881  if (config) {
2882  if (config->device_type != ist->hwaccel_device_type) {
2883  // Different hwaccel offered, ignore.
2884  continue;
2885  }
2886 
2888  if (ret < 0) {
2889  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2891  "%s hwaccel requested for input stream #%d:%d, "
2892  "but cannot be initialized.\n",
2893  av_hwdevice_get_type_name(config->device_type),
2894  ist->file_index, ist->st->index);
2895  return AV_PIX_FMT_NONE;
2896  }
2897  continue;
2898  }
2899  } else {
2900  const HWAccel *hwaccel = NULL;
2901  int i;
2902  for (i = 0; hwaccels[i].name; i++) {
2903  if (hwaccels[i].pix_fmt == *p) {
2904  hwaccel = &hwaccels[i];
2905  break;
2906  }
2907  }
2908  if (!hwaccel) {
2909  // No hwaccel supporting this pixfmt.
2910  continue;
2911  }
2912  if (hwaccel->id != ist->hwaccel_id) {
2913  // Does not match requested hwaccel.
2914  continue;
2915  }
2916 
2917  ret = hwaccel->init(s);
2918  if (ret < 0) {
2920  "%s hwaccel requested for input stream #%d:%d, "
2921  "but cannot be initialized.\n", hwaccel->name,
2922  ist->file_index, ist->st->index);
2923  return AV_PIX_FMT_NONE;
2924  }
2925  }
2926 
2927  if (ist->hw_frames_ctx) {
2928  s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2929  if (!s->hw_frames_ctx)
2930  return AV_PIX_FMT_NONE;
2931  }
2932 
2933  ist->hwaccel_pix_fmt = *p;
2934  break;
2935  }
2936 
2937  return *p;
2938 }
2939 
2941 {
2942  InputStream *ist = s->opaque;
2943 
2944  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2945  return ist->hwaccel_get_buffer(s, frame, flags);
2946 
2948 }
2949 
2950 static int init_input_stream(int ist_index, char *error, int error_len)
2951 {
2952  int ret;
2953  InputStream *ist = input_streams[ist_index];
2954 
2955  if (ist->decoding_needed) {
2956  const AVCodec *codec = ist->dec;
2957  if (!codec) {
2958  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2959  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2960  return AVERROR(EINVAL);
2961  }
2962 
2963  ist->dec_ctx->opaque = ist;
2964  ist->dec_ctx->get_format = get_format;
2965  ist->dec_ctx->get_buffer2 = get_buffer;
2966 #if LIBAVCODEC_VERSION_MAJOR < 60
2968  ist->dec_ctx->thread_safe_callbacks = 1;
2970 #endif
2971 
2972  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2973  (ist->decoding_needed & DECODING_FOR_OST)) {
2974  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2975  if (ist->decoding_needed & DECODING_FOR_FILTER)
2976  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2977  }
2978 
2979  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2980  * audio, and video decoders such as cuvid or mediacodec */
2981  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2982 
2983  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2984  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2985  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2986  if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2987  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2988 
2990  if (ret < 0) {
2991  snprintf(error, error_len, "Device setup failed for "
2992  "decoder on input stream #%d:%d : %s",
2993  ist->file_index, ist->st->index, av_err2str(ret));
2994  return ret;
2995  }
2996 
2997  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2998  if (ret == AVERROR_EXPERIMENTAL)
2999  abort_codec_experimental(codec, 0);
3000 
3001  snprintf(error, error_len,
3002  "Error while opening decoder for input stream "
3003  "#%d:%d : %s",
3004  ist->file_index, ist->st->index, av_err2str(ret));
3005  return ret;
3006  }
3007  assert_avoptions(ist->decoder_opts);
3008  }
3009 
3010  ist->next_pts = AV_NOPTS_VALUE;
3011  ist->next_dts = AV_NOPTS_VALUE;
3012 
3013  return 0;
3014 }
3015 
3017 {
3018  if (ost->source_index >= 0)
3019  return input_streams[ost->source_index];
3020  return NULL;
3021 }
3022 
3023 static int compare_int64(const void *a, const void *b)
3024 {
3025  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
3026 }
3027 
3028 /* open the muxer when all the streams are initialized */
3030 {
3031  int ret, i;
3032 
3033  for (i = 0; i < of->ctx->nb_streams; i++) {
3035  if (!ost->initialized)
3036  return 0;
3037  }
3038 
3039  of->ctx->interrupt_callback = int_cb;
3040 
3041  ret = avformat_write_header(of->ctx, &of->opts);
3042  if (ret < 0) {
3044  "Could not write header for output file #%d "
3045  "(incorrect codec parameters ?): %s\n",
3047  return ret;
3048  }
3049  //assert_avoptions(of->opts);
3050  of->header_written = 1;
3051 
3052  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
3053  nb_output_dumped++;
3054 
3055  if (sdp_filename || want_sdp)
3056  print_sdp();
3057 
3058  /* flush the muxing queues */
3059  for (i = 0; i < of->ctx->nb_streams; i++) {
3061 
3062  /* try to improve muxing time_base (only possible if nothing has been written yet) */
3063  if (!av_fifo_size(ost->muxing_queue))
3065 
3066  while (av_fifo_size(ost->muxing_queue)) {
3067  AVPacket *pkt;
3070  write_packet(of, pkt, ost, 1);
3071  av_packet_free(&pkt);
3072  }
3073  }
3074 
3075  return 0;
3076 }
3077 
3079 {
3081  int ret;
3082 
3083  if (!ctx)
3084  return 0;
3085 
3086  ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3087  if (ret < 0)
3088  return ret;
3089 
3090  ctx->time_base_in = ost->st->time_base;
3091 
3092  ret = av_bsf_init(ctx);
3093  if (ret < 0) {
3094  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3095  ctx->filter->name);
3096  return ret;
3097  }
3098 
3099  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3100  if (ret < 0)
3101  return ret;
3102  ost->st->time_base = ctx->time_base_out;
3103 
3104  return 0;
3105 }
3106 
3108 {
3111  AVCodecParameters *par_dst = ost->st->codecpar;
3112  AVCodecParameters *par_src = ost->ref_par;
3113  AVRational sar;
3114  int i, ret;
3115  uint32_t codec_tag = par_dst->codec_tag;
3116 
3117  av_assert0(ist && !ost->filter);
3118 
3119  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3120  if (ret >= 0)
3122  if (ret < 0) {
3124  "Error setting up codec context options.\n");
3125  return ret;
3126  }
3127 
3129  if (ret < 0) {
3131  "Error getting reference codec parameters.\n");
3132  return ret;
3133  }
3134 
3135  if (!codec_tag) {
3136  unsigned int codec_tag_tmp;
3137  if (!of->ctx->oformat->codec_tag ||
3138  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3139  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3140  codec_tag = par_src->codec_tag;
3141  }
3142 
3143  ret = avcodec_parameters_copy(par_dst, par_src);
3144  if (ret < 0)
3145  return ret;
3146 
3147  par_dst->codec_tag = codec_tag;
3148 
3149  if (!ost->frame_rate.num)
3150  ost->frame_rate = ist->framerate;
3151 
3152  if (ost->frame_rate.num)
3154  else
3155  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3156 
3158  if (ret < 0)
3159  return ret;
3160 
3161  // copy timebase while removing common factors
3162  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) {
3163  if (ost->frame_rate.num)
3165  else
3167  }
3168 
3169  // copy estimated duration as a hint to the muxer
3170  if (ost->st->duration <= 0 && ist->st->duration > 0)
3171  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3172 
3173  // copy disposition
3174  ost->st->disposition = ist->st->disposition;
3175 
3176  if (ist->st->nb_side_data) {
3177  for (i = 0; i < ist->st->nb_side_data; i++) {
3178  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3179  uint8_t *dst_data;
3180 
3181  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3182  if (!dst_data)
3183  return AVERROR(ENOMEM);
3184  memcpy(dst_data, sd_src->data, sd_src->size);
3185  }
3186  }
3187 
3188  if (ost->rotate_overridden) {
3190  sizeof(int32_t) * 9);
3191  if (sd)
3193  }
3194 
3195  switch (par_dst->codec_type) {
3196  case AVMEDIA_TYPE_AUDIO:
3197  if (audio_volume != 256) {
3198  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3199  exit_program(1);
3200  }
3201  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3202  par_dst->block_align= 0;
3203  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3204  par_dst->block_align= 0;
3205  break;
3206  case AVMEDIA_TYPE_VIDEO:
3207  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3208  sar =
3210  (AVRational){ par_dst->height, par_dst->width });
3211  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3212  "with stream copy may produce invalid files\n");
3213  }
3214  else if (ist->st->sample_aspect_ratio.num)
3215  sar = ist->st->sample_aspect_ratio;
3216  else
3217  sar = par_src->sample_aspect_ratio;
3218  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3219  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3220  ost->st->r_frame_rate = ist->st->r_frame_rate;
3221  break;
3222  }
3223 
3224  ost->mux_timebase = ist->st->time_base;
3225 
3226  return 0;
3227 }
3228 
3230 {
3231  AVDictionaryEntry *e;
3232 
3233  uint8_t *encoder_string;
3234  int encoder_string_len;
3235  int format_flags = 0;
3236  int codec_flags = ost->enc_ctx->flags;
3237 
3238  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3239  return;
3240 
3241  e = av_dict_get(of->opts, "fflags", NULL, 0);
3242  if (e) {
3243  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3244  if (!o)
3245  return;
3246  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3247  }
3248  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3249  if (e) {
3250  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3251  if (!o)
3252  return;
3254  }
3255 
3256  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3257  encoder_string = av_mallocz(encoder_string_len);
3258  if (!encoder_string)
3259  exit_program(1);
3260 
3261  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3262  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3263  else
3264  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3265  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3266  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3268 }
3269 
3271  AVCodecContext *avctx)
3272 {
3273  char *p;
3274  int n = 1, i, size, index = 0;
3275  int64_t t, *pts;
3276 
3277  for (p = kf; *p; p++)
3278  if (*p == ',')
3279  n++;
3280  size = n;
3281  pts = av_malloc_array(size, sizeof(*pts));
3282  if (!pts) {
3283  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3284  exit_program(1);
3285  }
3286 
3287  p = kf;
3288  for (i = 0; i < n; i++) {
3289  char *next = strchr(p, ',');
3290 
3291  if (next)
3292  *next++ = 0;
3293 
3294  if (!memcmp(p, "chapters", 8)) {
3295 
3297  int j;
3298 
3299  if (avf->nb_chapters > INT_MAX - size ||
3300  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3301  sizeof(*pts)))) {
3303  "Could not allocate forced key frames array.\n");
3304  exit_program(1);
3305  }
3306  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3307  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3308 
3309  for (j = 0; j < avf->nb_chapters; j++) {
3310  AVChapter *c = avf->chapters[j];
3311  av_assert1(index < size);
3312  pts[index++] = av_rescale_q(c->start, c->time_base,
3313  avctx->time_base) + t;
3314  }
3315 
3316  } else {
3317 
3318  t = parse_time_or_die("force_key_frames", p, 1);
3319  av_assert1(index < size);
3320  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3321 
3322  }
3323 
3324  p = next;
3325  }
3326 
3327  av_assert0(index == size);
3328  qsort(pts, size, sizeof(*pts), compare_int64);
3330  ost->forced_kf_pts = pts;
3331 }
3332 
3333 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3334 {
3336  AVCodecContext *enc_ctx = ost->enc_ctx;
3337  AVFormatContext *oc;
3338 
3339  if (ost->enc_timebase.num > 0) {
3340  enc_ctx->time_base = ost->enc_timebase;
3341  return;
3342  }
3343 
3344  if (ost->enc_timebase.num < 0) {
3345  if (ist) {
3346  enc_ctx->time_base = ist->st->time_base;
3347  return;
3348  }
3349 
3350  oc = output_files[ost->file_index]->ctx;
3351  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3352  }
3353 
3354  enc_ctx->time_base = default_time_base;
3355 }
3356 
3358 {
3360  AVCodecContext *enc_ctx = ost->enc_ctx;
3363  int j, ret;
3364 
3366 
3367  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3368  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3369  // which have to be filtered out to prevent leaking them to output files.
3370  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3371 
3372  if (ist) {
3373  ost->st->disposition = ist->st->disposition;
3374 
3375  dec_ctx = ist->dec_ctx;
3376 
3378  } else {
3379  for (j = 0; j < oc->nb_streams; j++) {
3380  AVStream *st = oc->streams[j];
3381  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3382  break;
3383  }
3384  if (j == oc->nb_streams)
3388  }
3389 
3390  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3391  if (!ost->frame_rate.num)
3393  if (ist && !ost->frame_rate.num)
3394  ost->frame_rate = ist->framerate;
3395  if (ist && !ost->frame_rate.num)
3396  ost->frame_rate = ist->st->r_frame_rate;
3397  if (ist && !ost->frame_rate.num && !ost->max_frame_rate.num) {
3398  ost->frame_rate = (AVRational){25, 1};
3400  "No information "
3401  "about the input framerate is available. Falling "
3402  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3403  "if you want a different framerate.\n",
3404  ost->file_index, ost->index);
3405  }
3406 
3407  if (ost->max_frame_rate.num &&
3409  !ost->frame_rate.den))
3411 
3412  if (ost->enc->supported_framerates && !ost->force_fps) {
3413  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3414  ost->frame_rate = ost->enc->supported_framerates[idx];
3415  }
3416  // reduce frame rate for mpeg4 to be within the spec limits
3417  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3419  ost->frame_rate.num, ost->frame_rate.den, 65535);
3420  }
3421  }
3422 
3423  switch (enc_ctx->codec_type) {
3424  case AVMEDIA_TYPE_AUDIO:
3426  if (dec_ctx)
3428  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3432 
3434  break;
3435 
3436  case AVMEDIA_TYPE_VIDEO:
3438 
3439  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3441  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3443  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3444  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3445  }
3446 
3447  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3448  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3450  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3451  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3453 
3455  if (dec_ctx)
3457  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3458 
3459  if (frame) {
3460  enc_ctx->color_range = frame->color_range;
3461  enc_ctx->color_primaries = frame->color_primaries;
3462  enc_ctx->color_trc = frame->color_trc;
3463  enc_ctx->colorspace = frame->colorspace;
3464  enc_ctx->chroma_sample_location = frame->chroma_location;
3465  }
3466 
3467  enc_ctx->framerate = ost->frame_rate;
3468 
3470 
3471  if (!dec_ctx ||
3472  enc_ctx->width != dec_ctx->width ||
3473  enc_ctx->height != dec_ctx->height ||
3474  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3476  }
3477 
3478  // Field order: autodetection
3479  if (frame) {
3481  ost->top_field_first >= 0)
3482  frame->top_field_first = !!ost->top_field_first;
3483 
3484  if (frame->interlaced_frame) {
3485  if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
3486  enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
3487  else
3488  enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
3489  } else
3490  enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
3491  }
3492 
3493  // Field order: override
3494  if (ost->top_field_first == 0) {
3495  enc_ctx->field_order = AV_FIELD_BB;
3496  } else if (ost->top_field_first == 1) {
3497  enc_ctx->field_order = AV_FIELD_TT;
3498  }
3499 
3500  if (ost->forced_keyframes) {
3501  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3504  if (ret < 0) {
3506  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3507  return ret;
3508  }
3513 
3514  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3515  // parse it only for static kf timings
3516  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3518  }
3519  }
3520  break;
3521  case AVMEDIA_TYPE_SUBTITLE:
3522  enc_ctx->time_base = AV_TIME_BASE_Q;
3523  if (!enc_ctx->width) {
3526  }
3527  break;
3528  case AVMEDIA_TYPE_DATA:
3529  break;
3530  default:
3531  abort();
3532  break;
3533  }
3534 
3535  ost->mux_timebase = enc_ctx->time_base;
3536 
3537  return 0;
3538 }
3539 
3541  char *error, int error_len)
3542 {
3543  int ret = 0;
3544 
3545  if (ost->encoding_needed) {
3546  const AVCodec *codec = ost->enc;
3547  AVCodecContext *dec = NULL;
3548  InputStream *ist;
3549 
3551  if (ret < 0)
3552  return ret;
3553 
3554  if ((ist = get_input_stream(ost)))
3555  dec = ist->dec_ctx;
3556  if (dec && dec->subtitle_header) {
3557  /* ASS code assumes this buffer is null terminated so add extra byte. */
3558  ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3559  if (!ost->enc_ctx->subtitle_header)
3560  return AVERROR(ENOMEM);
3561  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3562  ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3563  }
3564  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3565  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3566 
3568  if (ret < 0) {
3569  snprintf(error, error_len, "Device setup failed for "
3570  "encoder on output stream #%d:%d : %s",
3572  return ret;
3573  }
3574 
3575  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3576  int input_props = 0, output_props = 0;
3577  AVCodecDescriptor const *input_descriptor =
3578  avcodec_descriptor_get(dec->codec_id);
3579  AVCodecDescriptor const *output_descriptor =
3581  if (input_descriptor)
3582  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3583  if (output_descriptor)
3584  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3585  if (input_props && output_props && input_props != output_props) {
3586  snprintf(error, error_len,
3587  "Subtitle encoding currently only possible from text to text "
3588  "or bitmap to bitmap");
3589  return AVERROR_INVALIDDATA;
3590  }
3591  }
3592 
3593  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3594  if (ret == AVERROR_EXPERIMENTAL)
3595  abort_codec_experimental(codec, 1);
3596  snprintf(error, error_len,
3597  "Error while opening encoder for output stream #%d:%d - "
3598  "maybe incorrect parameters such as bit_rate, rate, width or height",
3599  ost->file_index, ost->index);
3600  return ret;
3601  }
3602  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3603  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3605  ost->enc_ctx->frame_size);
3607  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3608  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3609  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3610  " It takes bits/s as argument, not kbits/s\n");
3611 
3613  if (ret < 0) {
3615  "Error initializing the output stream codec context.\n");
3616  exit_program(1);
3617  }
3618 
3619  if (ost->enc_ctx->nb_coded_side_data) {
3620  int i;
3621 
3622  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3623  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3624  uint8_t *dst_data;
3625 
3626  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3627  if (!dst_data)
3628  return AVERROR(ENOMEM);
3629  memcpy(dst_data, sd_src->data, sd_src->size);
3630  }
3631  }
3632 
3633  /*
3634  * Add global input side data. For now this is naive, and copies it
3635  * from the input stream's global side data. All side data should
3636  * really be funneled over AVFrame and libavfilter, then added back to
3637  * packet side data, and then potentially using the first packet for
3638  * global side data.
3639  */
3640  if (ist) {
3641  int i;
3642  for (i = 0; i < ist->st->nb_side_data; i++) {
3643  AVPacketSideData *sd = &ist->st->side_data[i];
3644  if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3645  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3646  if (!dst)
3647  return AVERROR(ENOMEM);
3648  memcpy(dst, sd->data, sd->size);
3649  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3650  av_display_rotation_set((uint32_t *)dst, 0);
3651  }
3652  }
3653  }
3654 
3655  // copy timebase while removing common factors
3656  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3658 
3659  // copy estimated duration as a hint to the muxer
3660  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3661  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3662  } else if (ost->stream_copy) {
3664  if (ret < 0)
3665  return ret;
3666  }
3667 
3668  // parse user provided disposition, and update stream values
3669  if (ost->disposition) {
3670  static const AVOption opts[] = {
3671  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3672  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3673  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3674  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3675  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3676  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3677  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3678  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3679  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3680  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3681  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3682  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3683  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3684  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3685  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3686  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3687  { NULL },
3688  };
3689  static const AVClass class = {
3690  .class_name = "",
3691  .item_name = av_default_item_name,
3692  .option = opts,
3693  .version = LIBAVUTIL_VERSION_INT,
3694  };
3695  const AVClass *pclass = &class;
3696 
3697  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3698  if (ret < 0)
3699  return ret;
3700  }
3701 
3702  /* initialize bitstream filters for the output stream
3703  * needs to be done here, because the codec id for streamcopy is not
3704  * known until now */
3706  if (ret < 0)
3707  return ret;
3708 
3709  ost->initialized = 1;
3710 
3712  if (ret < 0)
3713  return ret;
3714 
3715  return ret;
3716 }
3717 
3718 static void report_new_stream(int input_index, AVPacket *pkt)
3719 {
3720  InputFile *file = input_files[input_index];
3721  AVStream *st = file->ctx->streams[pkt->stream_index];
3722 
3723  if (pkt->stream_index < file->nb_streams_warn)
3724  return;
3725  av_log(file->ctx, AV_LOG_WARNING,
3726  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3728  input_index, pkt->stream_index,
3729  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3730  file->nb_streams_warn = pkt->stream_index + 1;
3731 }
3732 
3733 static int transcode_init(void)
3734 {
3735  int ret = 0, i, j, k;
3736  AVFormatContext *oc;
3737  OutputStream *ost;
3738  InputStream *ist;
3739  char error[1024] = {0};
3740 
3741  for (i = 0; i < nb_filtergraphs; i++) {
3742  FilterGraph *fg = filtergraphs[i];
3743  for (j = 0; j < fg->nb_outputs; j++) {
3744  OutputFilter *ofilter = fg->outputs[j];
3745  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3746  continue;
3747  if (fg->nb_inputs != 1)
3748  continue;
3749  for (k = nb_input_streams-1; k >= 0 ; k--)
3750  if (fg->inputs[0]->ist == input_streams[k])
3751  break;
3752  ofilter->ost->source_index = k;
3753  }
3754  }
3755 
3756  /* init framerate emulation */
3757  for (i = 0; i < nb_input_files; i++) {
3759  if (ifile->readrate || ifile->rate_emu)
3760  for (j = 0; j < ifile->nb_streams; j++)
3761  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3762  }
3763 
3764  /* init input streams */
3765  for (i = 0; i < nb_input_streams; i++)
3766  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3767  for (i = 0; i < nb_output_streams; i++) {
3768  ost = output_streams[i];
3770  }
3771  goto dump_format;
3772  }
3773 
3774  /*
3775  * initialize stream copy and subtitle/data streams.
3776  * Encoded AVFrame based streams will get initialized as follows:
3777  * - when the first AVFrame is received in do_video_out
3778  * - just before the first AVFrame is received in either transcode_step
3779  * or reap_filters due to us requiring the filter chain buffer sink
3780  * to be configured with the correct audio frame size, which is only
3781  * known after the encoder is initialized.
3782  */
3783  for (i = 0; i < nb_output_streams; i++) {
3784  if (!output_streams[i]->stream_copy &&
3785  (output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3787  continue;
3788 
3790  if (ret < 0)
3791  goto dump_format;
3792  }
3793 
3794  /* discard unused programs */
3795  for (i = 0; i < nb_input_files; i++) {
3797  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3798  AVProgram *p = ifile->ctx->programs[j];
3799  int discard = AVDISCARD_ALL;
3800 
3801  for (k = 0; k < p->nb_stream_indexes; k++)
3802  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3803  discard = AVDISCARD_DEFAULT;
3804  break;
3805  }
3806  p->discard = discard;
3807  }
3808  }
3809 
3810  /* write headers for files with no streams */
3811  for (i = 0; i < nb_output_files; i++) {
3812  oc = output_files[i]->ctx;
3813  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3815  if (ret < 0)
3816  goto dump_format;
3817  }
3818  }
3819 
3820  dump_format:
3821  /* dump the stream mapping */
3822  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3823  for (i = 0; i < nb_input_streams; i++) {
3824  ist = input_streams[i];
3825 
3826  for (j = 0; j < ist->nb_filters; j++) {
3827  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3828  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3829  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3830  ist->filters[j]->name);
3831  if (nb_filtergraphs > 1)
3832  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3833  av_log(NULL, AV_LOG_INFO, "\n");
3834  }
3835  }
3836  }
3837 
3838  for (i = 0; i < nb_output_streams; i++) {
3839  ost = output_streams[i];
3840 
3841  if (ost->attachment_filename) {
3842  /* an attached file */
3843  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3845  continue;
3846  }
3847 
3849  /* output from a complex graph */
3850  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3851  if (nb_filtergraphs > 1)
3852  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3853 
3854  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3855  ost->index, ost->enc ? ost->enc->name : "?");
3856  continue;
3857  }
3858 
3859  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3862  ost->file_index,
3863  ost->index);
3865  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3867  ost->sync_ist->st->index);
3868  if (ost->stream_copy)
3869  av_log(NULL, AV_LOG_INFO, " (copy)");
3870  else {
3871  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3872  const AVCodec *out_codec = ost->enc;
3873  const char *decoder_name = "?";
3874  const char *in_codec_name = "?";
3875  const char *encoder_name = "?";
3876  const char *out_codec_name = "?";
3877  const AVCodecDescriptor *desc;
3878 
3879  if (in_codec) {
3880  decoder_name = in_codec->name;
3881  desc = avcodec_descriptor_get(in_codec->id);
3882  if (desc)
3883  in_codec_name = desc->name;
3884  if (!strcmp(decoder_name, in_codec_name))
3885  decoder_name = "native";
3886  }
3887 
3888  if (out_codec) {
3889  encoder_name = out_codec->name;
3890  desc = avcodec_descriptor_get(out_codec->id);
3891  if (desc)
3892  out_codec_name = desc->name;
3893  if (!strcmp(encoder_name, out_codec_name))
3894  encoder_name = "native";
3895  }
3896 
3897  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3898  in_codec_name, decoder_name,
3899  out_codec_name, encoder_name);
3900  }
3901  av_log(NULL, AV_LOG_INFO, "\n");
3902  }
3903 
3904  if (ret) {
3905  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3906  return ret;
3907  }
3908 
3910 
3911  return 0;
3912 }
3913 
3914 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3915 static int need_output(void)
3916 {
3917  int i;
3918 
3919  for (i = 0; i < nb_output_streams; i++) {
3923 
3924  if (ost->finished ||
3925  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3926  continue;
3927  if (ost->frame_number >= ost->max_frames) {
3928  int j;
3929  for (j = 0; j < of->ctx->nb_streams; j++)
3931  continue;
3932  }
3933 
3934  return 1;
3935  }
3936 
3937  return 0;
3938 }
3939 
3940 /**
3941  * Select the output stream to process.
3942  *
3943  * @return selected output stream, or NULL if none available
3944  */
3946 {
3947  int i;
3948  int64_t opts_min = INT64_MAX;
3949  OutputStream *ost_min = NULL;
3950 
3951  for (i = 0; i < nb_output_streams; i++) {
3953  int64_t opts = ost->last_mux_dts == AV_NOPTS_VALUE ? INT64_MIN :
3955  AV_TIME_BASE_Q);
3958  "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3960 
3961  if (!ost->initialized && !ost->inputs_done)
3962  return ost->unavailable ? NULL : ost;
3963 
3964  if (!ost->finished && opts < opts_min) {
3965  opts_min = opts;
3966  ost_min = ost->unavailable ? NULL : ost;
3967  }
3968  }
3969  return ost_min;
3970 }
3971 
3972 static void set_tty_echo(int on)
3973 {
3974 #if HAVE_TERMIOS_H
3975  struct termios tty;
3976  if (tcgetattr(0, &tty) == 0) {
3977  if (on) tty.c_lflag |= ECHO;
3978  else tty.c_lflag &= ~ECHO;
3979  tcsetattr(0, TCSANOW, &tty);
3980  }
3981 #endif
3982 }
3983 
3984 static int check_keyboard_interaction(int64_t cur_time)
3985 {
3986  int i, ret, key;
3987  static int64_t last_time;
3988  if (received_nb_signals)
3989  return AVERROR_EXIT;
3990  /* read_key() returns 0 on EOF */
3991  if(cur_time - last_time >= 100000 && !run_as_daemon){
3992  key = read_key();
3993  last_time = cur_time;
3994  }else
3995  key = -1;
3996  if (key == 'q')
3997  return AVERROR_EXIT;
3998  if (key == '+') av_log_set_level(av_log_get_level()+10);
3999  if (key == '-') av_log_set_level(av_log_get_level()-10);
4000  if (key == 's') qp_hist ^= 1;
4001  if (key == 'h'){
4002  if (do_hex_dump){
4003  do_hex_dump = do_pkt_dump = 0;
4004  } else if(do_pkt_dump){
4005  do_hex_dump = 1;
4006  } else
4007  do_pkt_dump = 1;
4009  }
4010  if (key == 'c' || key == 'C'){
4011  char buf[4096], target[64], command[256], arg[256] = {0};
4012  double time;
4013  int k, n = 0;
4014  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
4015  i = 0;
4016  set_tty_echo(1);
4017  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4018  if (k > 0)
4019  buf[i++] = k;
4020  buf[i] = 0;
4021  set_tty_echo(0);
4022  fprintf(stderr, "\n");
4023  if (k > 0 &&
4024  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
4025  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
4026  target, time, command, arg);
4027  for (i = 0; i < nb_filtergraphs; i++) {
4028  FilterGraph *fg = filtergraphs[i];
4029  if (fg->graph) {
4030  if (time < 0) {
4031  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
4032  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
4033  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
4034  } else if (key == 'c') {
4035  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
4037  } else {
4038  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
4039  if (ret < 0)
4040  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
4041  }
4042  }
4043  }
4044  } else {
4046  "Parse error, at least 3 arguments were expected, "
4047  "only %d given in string '%s'\n", n, buf);
4048  }
4049  }
4050  if (key == 'd' || key == 'D'){
4051  int debug=0;
4052  if(key == 'D') {
4053  debug = input_streams[0]->dec_ctx->debug << 1;
4054  if(!debug) debug = 1;
4055  while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash
4056  debug += debug;
4057  }else{
4058  char buf[32];
4059  int k = 0;
4060  i = 0;
4061  set_tty_echo(1);
4062  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4063  if (k > 0)
4064  buf[i++] = k;
4065  buf[i] = 0;
4066  set_tty_echo(0);
4067  fprintf(stderr, "\n");
4068  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
4069  fprintf(stderr,"error parsing debug value\n");
4070  }
4071  for(i=0;i<nb_input_streams;i++) {
4072&#