FFmpeg
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include "config_components.h"
28 #include <inttypes.h>
29 #include <math.h>
30 #include <limits.h>
31 #include <signal.h>
32 #include <stdint.h>
33 
34 #include "libavutil/avstring.h"
36 #include "libavutil/eval.h"
37 #include "libavutil/mathematics.h"
38 #include "libavutil/pixdesc.h"
39 #include "libavutil/imgutils.h"
40 #include "libavutil/dict.h"
41 #include "libavutil/fifo.h"
42 #include "libavutil/parseutils.h"
43 #include "libavutil/samplefmt.h"
44 #include "libavutil/time.h"
45 #include "libavutil/bprint.h"
46 #include "libavformat/avformat.h"
47 #include "libavdevice/avdevice.h"
48 #include "libswscale/swscale.h"
49 #include "libavutil/opt.h"
50 #include "libavcodec/avfft.h"
52 
53 #if CONFIG_AVFILTER
54 # include "libavfilter/avfilter.h"
55 # include "libavfilter/buffersink.h"
56 # include "libavfilter/buffersrc.h"
57 #endif
58 
59 #include <SDL.h>
60 #include <SDL_thread.h>
61 
62 #include "cmdutils.h"
63 #include "opt_common.h"
64 
65 const char program_name[] = "ffplay";
66 const int program_birth_year = 2003;
67 
68 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
69 #define MIN_FRAMES 25
70 #define EXTERNAL_CLOCK_MIN_FRAMES 2
71 #define EXTERNAL_CLOCK_MAX_FRAMES 10
72 
73 /* Minimum SDL audio buffer size, in samples. */
74 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
75 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
76 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
77 
78 /* Step size for volume control in dB */
79 #define SDL_VOLUME_STEP (0.75)
80 
81 /* no AV sync correction is done if below the minimum AV sync threshold */
82 #define AV_SYNC_THRESHOLD_MIN 0.04
83 /* AV sync correction is done if above the maximum AV sync threshold */
84 #define AV_SYNC_THRESHOLD_MAX 0.1
85 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
86 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
87 /* no AV correction is done if too big error */
88 #define AV_NOSYNC_THRESHOLD 10.0
89 
90 /* maximum audio speed change to get correct sync */
91 #define SAMPLE_CORRECTION_PERCENT_MAX 10
92 
93 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
94 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
95 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
96 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
97 
98 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
99 #define AUDIO_DIFF_AVG_NB 20
100 
101 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
102 #define REFRESH_RATE 0.01
103 
104 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
105 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
106 #define SAMPLE_ARRAY_SIZE (8 * 65536)
107 
108 #define CURSOR_HIDE_DELAY 1000000
109 
110 #define USE_ONEPASS_SUBTITLE_RENDER 1
111 
112 static unsigned sws_flags = SWS_BICUBIC;
113 
114 typedef struct MyAVPacketList {
116  int serial;
118 
119 typedef struct PacketQueue {
122  int size;
123  int64_t duration;
125  int serial;
126  SDL_mutex *mutex;
127  SDL_cond *cond;
128 } PacketQueue;
129 
130 #define VIDEO_PICTURE_QUEUE_SIZE 3
131 #define SUBPICTURE_QUEUE_SIZE 16
132 #define SAMPLE_QUEUE_SIZE 9
133 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
134 
135 typedef struct AudioParams {
136  int freq;
141 } AudioParams;
142 
143 typedef struct Clock {
144  double pts; /* clock base */
145  double pts_drift; /* clock base minus time at which we updated the clock */
146  double last_updated;
147  double speed;
148  int serial; /* clock is based on a packet with this serial */
149  int paused;
150  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
151 } Clock;
152 
153 /* Common struct for handling all types of decoded data and allocated render buffers. */
154 typedef struct Frame {
157  int serial;
158  double pts; /* presentation timestamp for the frame */
159  double duration; /* estimated duration of the frame */
160  int64_t pos; /* byte position of the frame in the input file */
161  int width;
162  int height;
163  int format;
165  int uploaded;
166  int flip_v;
167 } Frame;
168 
169 typedef struct FrameQueue {
171  int rindex;
172  int windex;
173  int size;
174  int max_size;
177  SDL_mutex *mutex;
178  SDL_cond *cond;
180 } FrameQueue;
181 
182 enum {
183  AV_SYNC_AUDIO_MASTER, /* default choice */
185  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
186 };
187 
188 typedef struct Decoder {
193  int finished;
195  SDL_cond *empty_queue_cond;
196  int64_t start_pts;
198  int64_t next_pts;
200  SDL_Thread *decoder_tid;
201 } Decoder;
202 
203 typedef struct VideoState {
204  SDL_Thread *read_tid;
208  int paused;
211  int seek_req;
213  int64_t seek_pos;
214  int64_t seek_rel;
217  int realtime;
218 
222 
226 
230 
232 
234 
235  double audio_clock;
237  double audio_diff_cum; /* used for AV difference average computation */
244  uint8_t *audio_buf;
245  uint8_t *audio_buf1;
246  unsigned int audio_buf_size; /* in bytes */
247  unsigned int audio_buf1_size;
248  int audio_buf_index; /* in bytes */
251  int muted;
253 #if CONFIG_AVFILTER
254  struct AudioParams audio_filter_src;
255 #endif
260 
261  enum ShowMode {
263  } show_mode;
270  int xpos;
272  SDL_Texture *vis_texture;
273  SDL_Texture *sub_texture;
274  SDL_Texture *vid_texture;
275 
279 
280  double frame_timer;
286  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
289  int eof;
290 
291  char *filename;
293  int step;
294 
295 #if CONFIG_AVFILTER
296  int vfilter_idx;
297  AVFilterContext *in_video_filter; // the first filter in the video chain
298  AVFilterContext *out_video_filter; // the last filter in the video chain
299  AVFilterContext *in_audio_filter; // the first filter in the audio chain
300  AVFilterContext *out_audio_filter; // the last filter in the audio chain
301  AVFilterGraph *agraph; // audio filter graph
302 #endif
303 
305 
307 } VideoState;
308 
309 /* options specified by the user */
311 static const char *input_filename;
312 static const char *window_title;
313 static int default_width = 640;
314 static int default_height = 480;
315 static int screen_width = 0;
316 static int screen_height = 0;
317 static int screen_left = SDL_WINDOWPOS_CENTERED;
318 static int screen_top = SDL_WINDOWPOS_CENTERED;
319 static int audio_disable;
320 static int video_disable;
321 static int subtitle_disable;
322 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
323 static int seek_by_bytes = -1;
324 static float seek_interval = 10;
325 static int display_disable;
326 static int borderless;
327 static int alwaysontop;
328 static int startup_volume = 100;
329 static int show_status = -1;
331 static int64_t start_time = AV_NOPTS_VALUE;
332 static int64_t duration = AV_NOPTS_VALUE;
333 static int fast = 0;
334 static int genpts = 0;
335 static int lowres = 0;
336 static int decoder_reorder_pts = -1;
337 static int autoexit;
338 static int exit_on_keydown;
339 static int exit_on_mousedown;
340 static int loop = 1;
341 static int framedrop = -1;
342 static int infinite_buffer = -1;
343 static enum ShowMode show_mode = SHOW_MODE_NONE;
344 static const char *audio_codec_name;
345 static const char *subtitle_codec_name;
346 static const char *video_codec_name;
347 double rdftspeed = 0.02;
348 static int64_t cursor_last_shown;
349 static int cursor_hidden = 0;
350 #if CONFIG_AVFILTER
351 static const char **vfilters_list = NULL;
352 static int nb_vfilters = 0;
353 static char *afilters = NULL;
354 #endif
355 static int autorotate = 1;
356 static int find_stream_info = 1;
357 static int filter_nbthreads = 0;
358 
359 /* current context */
360 static int is_full_screen;
361 static int64_t audio_callback_time;
362 
363 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
364 
365 static SDL_Window *window;
366 static SDL_Renderer *renderer;
367 static SDL_RendererInfo renderer_info = {0};
368 static SDL_AudioDeviceID audio_dev;
369 
370 static const struct TextureFormatEntry {
374  { AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
375  { AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
376  { AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
377  { AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
378  { AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
379  { AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
380  { AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
381  { AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
382  { AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
383  { AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
384  { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
385  { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
386  { AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
387  { AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
388  { AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
389  { AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
390  { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
391  { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
392  { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
393  { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
394 };
395 
396 #if CONFIG_AVFILTER
397 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
398 {
399  GROW_ARRAY(vfilters_list, nb_vfilters);
400  vfilters_list[nb_vfilters - 1] = arg;
401  return 0;
402 }
403 #endif
404 
405 static inline
406 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
407  enum AVSampleFormat fmt2, int64_t channel_count2)
408 {
409  /* If channel count == 1, planar and non-planar formats are the same */
410  if (channel_count1 == 1 && channel_count2 == 1)
412  else
413  return channel_count1 != channel_count2 || fmt1 != fmt2;
414 }
415 
417 {
418  MyAVPacketList pkt1;
419  int ret;
420 
421  if (q->abort_request)
422  return -1;
423 
424 
425  pkt1.pkt = pkt;
426  pkt1.serial = q->serial;
427 
428  ret = av_fifo_write(q->pkt_list, &pkt1, 1);
429  if (ret < 0)
430  return ret;
431  q->nb_packets++;
432  q->size += pkt1.pkt->size + sizeof(pkt1);
433  q->duration += pkt1.pkt->duration;
434  /* XXX: should duplicate packet data in DV case */
435  SDL_CondSignal(q->cond);
436  return 0;
437 }
438 
440 {
441  AVPacket *pkt1;
442  int ret;
443 
444  pkt1 = av_packet_alloc();
445  if (!pkt1) {
447  return -1;
448  }
449  av_packet_move_ref(pkt1, pkt);
450 
451  SDL_LockMutex(q->mutex);
452  ret = packet_queue_put_private(q, pkt1);
453  SDL_UnlockMutex(q->mutex);
454 
455  if (ret < 0)
456  av_packet_free(&pkt1);
457 
458  return ret;
459 }
460 
461 static int packet_queue_put_nullpacket(PacketQueue *q, AVPacket *pkt, int stream_index)
462 {
463  pkt->stream_index = stream_index;
464  return packet_queue_put(q, pkt);
465 }
466 
467 /* packet queue handling */
469 {
470  memset(q, 0, sizeof(PacketQueue));
472  if (!q->pkt_list)
473  return AVERROR(ENOMEM);
474  q->mutex = SDL_CreateMutex();
475  if (!q->mutex) {
476  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
477  return AVERROR(ENOMEM);
478  }
479  q->cond = SDL_CreateCond();
480  if (!q->cond) {
481  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
482  return AVERROR(ENOMEM);
483  }
484  q->abort_request = 1;
485  return 0;
486 }
487 
489 {
490  MyAVPacketList pkt1;
491 
492  SDL_LockMutex(q->mutex);
493  while (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0)
494  av_packet_free(&pkt1.pkt);
495  q->nb_packets = 0;
496  q->size = 0;
497  q->duration = 0;
498  q->serial++;
499  SDL_UnlockMutex(q->mutex);
500 }
501 
503 {
506  SDL_DestroyMutex(q->mutex);
507  SDL_DestroyCond(q->cond);
508 }
509 
511 {
512  SDL_LockMutex(q->mutex);
513 
514  q->abort_request = 1;
515 
516  SDL_CondSignal(q->cond);
517 
518  SDL_UnlockMutex(q->mutex);
519 }
520 
522 {
523  SDL_LockMutex(q->mutex);
524  q->abort_request = 0;
525  q->serial++;
526  SDL_UnlockMutex(q->mutex);
527 }
528 
529 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
530 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
531 {
532  MyAVPacketList pkt1;
533  int ret;
534 
535  SDL_LockMutex(q->mutex);
536 
537  for (;;) {
538  if (q->abort_request) {
539  ret = -1;
540  break;
541  }
542 
543  if (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0) {
544  q->nb_packets--;
545  q->size -= pkt1.pkt->size + sizeof(pkt1);
546  q->duration -= pkt1.pkt->duration;
547  av_packet_move_ref(pkt, pkt1.pkt);
548  if (serial)
549  *serial = pkt1.serial;
550  av_packet_free(&pkt1.pkt);
551  ret = 1;
552  break;
553  } else if (!block) {
554  ret = 0;
555  break;
556  } else {
557  SDL_CondWait(q->cond, q->mutex);
558  }
559  }
560  SDL_UnlockMutex(q->mutex);
561  return ret;
562 }
563 
564 static int decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
565  memset(d, 0, sizeof(Decoder));
566  d->pkt = av_packet_alloc();
567  if (!d->pkt)
568  return AVERROR(ENOMEM);
569  d->avctx = avctx;
570  d->queue = queue;
571  d->empty_queue_cond = empty_queue_cond;
572  d->start_pts = AV_NOPTS_VALUE;
573  d->pkt_serial = -1;
574  return 0;
575 }
576 
578  int ret = AVERROR(EAGAIN);
579 
580  for (;;) {
581  if (d->queue->serial == d->pkt_serial) {
582  do {
583  if (d->queue->abort_request)
584  return -1;
585 
586  switch (d->avctx->codec_type) {
587  case AVMEDIA_TYPE_VIDEO:
588  ret = avcodec_receive_frame(d->avctx, frame);
589  if (ret >= 0) {
590  if (decoder_reorder_pts == -1) {
591  frame->pts = frame->best_effort_timestamp;
592  } else if (!decoder_reorder_pts) {
593  frame->pts = frame->pkt_dts;
594  }
595  }
596  break;
597  case AVMEDIA_TYPE_AUDIO:
598  ret = avcodec_receive_frame(d->avctx, frame);
599  if (ret >= 0) {
600  AVRational tb = (AVRational){1, frame->sample_rate};
601  if (frame->pts != AV_NOPTS_VALUE)
602  frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);
603  else if (d->next_pts != AV_NOPTS_VALUE)
604  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
605  if (frame->pts != AV_NOPTS_VALUE) {
606  d->next_pts = frame->pts + frame->nb_samples;
607  d->next_pts_tb = tb;
608  }
609  }
610  break;
611  }
612  if (ret == AVERROR_EOF) {
613  d->finished = d->pkt_serial;
614  avcodec_flush_buffers(d->avctx);
615  return 0;
616  }
617  if (ret >= 0)
618  return 1;
619  } while (ret != AVERROR(EAGAIN));
620  }
621 
622  do {
623  if (d->queue->nb_packets == 0)
624  SDL_CondSignal(d->empty_queue_cond);
625  if (d->packet_pending) {
626  d->packet_pending = 0;
627  } else {
628  int old_serial = d->pkt_serial;
629  if (packet_queue_get(d->queue, d->pkt, 1, &d->pkt_serial) < 0)
630  return -1;
631  if (old_serial != d->pkt_serial) {
632  avcodec_flush_buffers(d->avctx);
633  d->finished = 0;
634  d->next_pts = d->start_pts;
635  d->next_pts_tb = d->start_pts_tb;
636  }
637  }
638  if (d->queue->serial == d->pkt_serial)
639  break;
640  av_packet_unref(d->pkt);
641  } while (1);
642 
643  if (d->avctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
644  int got_frame = 0;
645  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, d->pkt);
646  if (ret < 0) {
647  ret = AVERROR(EAGAIN);
648  } else {
649  if (got_frame && !d->pkt->data) {
650  d->packet_pending = 1;
651  }
652  ret = got_frame ? 0 : (d->pkt->data ? AVERROR(EAGAIN) : AVERROR_EOF);
653  }
654  av_packet_unref(d->pkt);
655  } else {
656  if (avcodec_send_packet(d->avctx, d->pkt) == AVERROR(EAGAIN)) {
657  av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
658  d->packet_pending = 1;
659  } else {
660  av_packet_unref(d->pkt);
661  }
662  }
663  }
664 }
665 
666 static void decoder_destroy(Decoder *d) {
667  av_packet_free(&d->pkt);
668  avcodec_free_context(&d->avctx);
669 }
670 
672 {
673  av_frame_unref(vp->frame);
674  avsubtitle_free(&vp->sub);
675 }
676 
677 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
678 {
679  int i;
680  memset(f, 0, sizeof(FrameQueue));
681  if (!(f->mutex = SDL_CreateMutex())) {
682  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
683  return AVERROR(ENOMEM);
684  }
685  if (!(f->cond = SDL_CreateCond())) {
686  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
687  return AVERROR(ENOMEM);
688  }
689  f->pktq = pktq;
690  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
691  f->keep_last = !!keep_last;
692  for (i = 0; i < f->max_size; i++)
693  if (!(f->queue[i].frame = av_frame_alloc()))
694  return AVERROR(ENOMEM);
695  return 0;
696 }
697 
699 {
700  int i;
701  for (i = 0; i < f->max_size; i++) {
702  Frame *vp = &f->queue[i];
704  av_frame_free(&vp->frame);
705  }
706  SDL_DestroyMutex(f->mutex);
707  SDL_DestroyCond(f->cond);
708 }
709 
711 {
712  SDL_LockMutex(f->mutex);
713  SDL_CondSignal(f->cond);
714  SDL_UnlockMutex(f->mutex);
715 }
716 
718 {
719  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
720 }
721 
723 {
724  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
725 }
726 
728 {
729  return &f->queue[f->rindex];
730 }
731 
733 {
734  /* wait until we have space to put a new frame */
735  SDL_LockMutex(f->mutex);
736  while (f->size >= f->max_size &&
737  !f->pktq->abort_request) {
738  SDL_CondWait(f->cond, f->mutex);
739  }
740  SDL_UnlockMutex(f->mutex);
741 
742  if (f->pktq->abort_request)
743  return NULL;
744 
745  return &f->queue[f->windex];
746 }
747 
749 {
750  /* wait until we have a readable a new frame */
751  SDL_LockMutex(f->mutex);
752  while (f->size - f->rindex_shown <= 0 &&
753  !f->pktq->abort_request) {
754  SDL_CondWait(f->cond, f->mutex);
755  }
756  SDL_UnlockMutex(f->mutex);
757 
758  if (f->pktq->abort_request)
759  return NULL;
760 
761  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
762 }
763 
765 {
766  if (++f->windex == f->max_size)
767  f->windex = 0;
768  SDL_LockMutex(f->mutex);
769  f->size++;
770  SDL_CondSignal(f->cond);
771  SDL_UnlockMutex(f->mutex);
772 }
773 
775 {
776  if (f->keep_last && !f->rindex_shown) {
777  f->rindex_shown = 1;
778  return;
779  }
780  frame_queue_unref_item(&f->queue[f->rindex]);
781  if (++f->rindex == f->max_size)
782  f->rindex = 0;
783  SDL_LockMutex(f->mutex);
784  f->size--;
785  SDL_CondSignal(f->cond);
786  SDL_UnlockMutex(f->mutex);
787 }
788 
789 /* return the number of undisplayed frames in the queue */
791 {
792  return f->size - f->rindex_shown;
793 }
794 
795 /* return last shown position */
797 {
798  Frame *fp = &f->queue[f->rindex];
799  if (f->rindex_shown && fp->serial == f->pktq->serial)
800  return fp->pos;
801  else
802  return -1;
803 }
804 
805 static void decoder_abort(Decoder *d, FrameQueue *fq)
806 {
807  packet_queue_abort(d->queue);
808  frame_queue_signal(fq);
809  SDL_WaitThread(d->decoder_tid, NULL);
810  d->decoder_tid = NULL;
811  packet_queue_flush(d->queue);
812 }
813 
814 static inline void fill_rectangle(int x, int y, int w, int h)
815 {
816  SDL_Rect rect;
817  rect.x = x;
818  rect.y = y;
819  rect.w = w;
820  rect.h = h;
821  if (w && h)
822  SDL_RenderFillRect(renderer, &rect);
823 }
824 
825 static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
826 {
827  Uint32 format;
828  int access, w, h;
829  if (!*texture || SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
830  void *pixels;
831  int pitch;
832  if (*texture)
833  SDL_DestroyTexture(*texture);
834  if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
835  return -1;
836  if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
837  return -1;
838  if (init_texture) {
839  if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
840  return -1;
841  memset(pixels, 0, pitch * new_height);
842  SDL_UnlockTexture(*texture);
843  }
844  av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
845  }
846  return 0;
847 }
848 
849 static void calculate_display_rect(SDL_Rect *rect,
850  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
851  int pic_width, int pic_height, AVRational pic_sar)
852 {
853  AVRational aspect_ratio = pic_sar;
854  int64_t width, height, x, y;
855 
856  if (av_cmp_q(aspect_ratio, av_make_q(0, 1)) <= 0)
857  aspect_ratio = av_make_q(1, 1);
858 
859  aspect_ratio = av_mul_q(aspect_ratio, av_make_q(pic_width, pic_height));
860 
861  /* XXX: we suppose the screen has a 1.0 pixel ratio */
862  height = scr_height;
863  width = av_rescale(height, aspect_ratio.num, aspect_ratio.den) & ~1;
864  if (width > scr_width) {
865  width = scr_width;
866  height = av_rescale(width, aspect_ratio.den, aspect_ratio.num) & ~1;
867  }
868  x = (scr_width - width) / 2;
869  y = (scr_height - height) / 2;
870  rect->x = scr_xleft + x;
871  rect->y = scr_ytop + y;
872  rect->w = FFMAX((int)width, 1);
873  rect->h = FFMAX((int)height, 1);
874 }
875 
876 static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
877 {
878  int i;
879  *sdl_blendmode = SDL_BLENDMODE_NONE;
880  *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
881  if (format == AV_PIX_FMT_RGB32 ||
885  *sdl_blendmode = SDL_BLENDMODE_BLEND;
886  for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
888  *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
889  return;
890  }
891  }
892 }
893 
894 static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx) {
895  int ret = 0;
896  Uint32 sdl_pix_fmt;
897  SDL_BlendMode sdl_blendmode;
898  get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
899  if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
900  return -1;
901  switch (sdl_pix_fmt) {
902  case SDL_PIXELFORMAT_UNKNOWN:
903  /* This should only happen if we are not using avfilter... */
904  *img_convert_ctx = sws_getCachedContext(*img_convert_ctx,
905  frame->width, frame->height, frame->format, frame->width, frame->height,
907  if (*img_convert_ctx != NULL) {
908  uint8_t *pixels[4];
909  int pitch[4];
910  if (!SDL_LockTexture(*tex, NULL, (void **)pixels, pitch)) {
911  sws_scale(*img_convert_ctx, (const uint8_t * const *)frame->data, frame->linesize,
912  0, frame->height, pixels, pitch);
913  SDL_UnlockTexture(*tex);
914  }
915  } else {
916  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
917  ret = -1;
918  }
919  break;
920  case SDL_PIXELFORMAT_IYUV:
921  if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
922  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
923  frame->data[1], frame->linesize[1],
924  frame->data[2], frame->linesize[2]);
925  } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
926  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
927  frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
928  frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
929  } else {
930  av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
931  return -1;
932  }
933  break;
934  default:
935  if (frame->linesize[0] < 0) {
936  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
937  } else {
938  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
939  }
940  break;
941  }
942  return ret;
943 }
944 
946 {
947 #if SDL_VERSION_ATLEAST(2,0,8)
948  SDL_YUV_CONVERSION_MODE mode = SDL_YUV_CONVERSION_AUTOMATIC;
949  if (frame && (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUYV422 || frame->format == AV_PIX_FMT_UYVY422)) {
950  if (frame->color_range == AVCOL_RANGE_JPEG)
951  mode = SDL_YUV_CONVERSION_JPEG;
952  else if (frame->colorspace == AVCOL_SPC_BT709)
953  mode = SDL_YUV_CONVERSION_BT709;
954  else if (frame->colorspace == AVCOL_SPC_BT470BG || frame->colorspace == AVCOL_SPC_SMPTE170M)
955  mode = SDL_YUV_CONVERSION_BT601;
956  }
957  SDL_SetYUVConversionMode(mode); /* FIXME: no support for linear transfer */
958 #endif
959 }
960 
962 {
963  Frame *vp;
964  Frame *sp = NULL;
965  SDL_Rect rect;
966 
967  vp = frame_queue_peek_last(&is->pictq);
968  if (is->subtitle_st) {
969  if (frame_queue_nb_remaining(&is->subpq) > 0) {
970  sp = frame_queue_peek(&is->subpq);
971 
972  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
973  if (!sp->uploaded) {
974  uint8_t* pixels[4];
975  int pitch[4];
976  int i;
977  if (!sp->width || !sp->height) {
978  sp->width = vp->width;
979  sp->height = vp->height;
980  }
981  if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
982  return;
983 
984  for (i = 0; i < sp->sub.num_rects; i++) {
985  AVSubtitleRect *sub_rect = sp->sub.rects[i];
986 
987  sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
988  sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
989  sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
990  sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
991 
992  is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx,
993  sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
994  sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
995  0, NULL, NULL, NULL);
996  if (!is->sub_convert_ctx) {
997  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
998  return;
999  }
1000  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) {
1001  sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
1002  0, sub_rect->h, pixels, pitch);
1003  SDL_UnlockTexture(is->sub_texture);
1004  }
1005  }
1006  sp->uploaded = 1;
1007  }
1008  } else
1009  sp = NULL;
1010  }
1011  }
1012 
1013  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1015 
1016  if (!vp->uploaded) {
1017  if (upload_texture(&is->vid_texture, vp->frame, &is->img_convert_ctx) < 0) {
1019  return;
1020  }
1021  vp->uploaded = 1;
1022  vp->flip_v = vp->frame->linesize[0] < 0;
1023  }
1024 
1025  SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
1027  if (sp) {
1028 #if USE_ONEPASS_SUBTITLE_RENDER
1029  SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
1030 #else
1031  int i;
1032  double xratio = (double)rect.w / (double)sp->width;
1033  double yratio = (double)rect.h / (double)sp->height;
1034  for (i = 0; i < sp->sub.num_rects; i++) {
1035  SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
1036  SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
1037  .y = rect.y + sub_rect->y * yratio,
1038  .w = sub_rect->w * xratio,
1039  .h = sub_rect->h * yratio};
1040  SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
1041  }
1042 #endif
1043  }
1044 }
1045 
1046 static inline int compute_mod(int a, int b)
1047 {
1048  return a < 0 ? a%b + b : a%b;
1049 }
1050 
1052 {
1053  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1054  int ch, channels, h, h2;
1055  int64_t time_diff;
1056  int rdft_bits, nb_freq;
1057 
1058  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1059  ;
1060  nb_freq = 1 << (rdft_bits - 1);
1061 
1062  /* compute display index : center on currently output samples */
1063  channels = s->audio_tgt.ch_layout.nb_channels;
1064  nb_display_channels = channels;
1065  if (!s->paused) {
1066  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1067  n = 2 * channels;
1068  delay = s->audio_write_buf_size;
1069  delay /= n;
1070 
1071  /* to be more precise, we take into account the time spent since
1072  the last buffer computation */
1073  if (audio_callback_time) {
1074  time_diff = av_gettime_relative() - audio_callback_time;
1075  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1076  }
1077 
1078  delay += 2 * data_used;
1079  if (delay < data_used)
1080  delay = data_used;
1081 
1082  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1083  if (s->show_mode == SHOW_MODE_WAVES) {
1084  h = INT_MIN;
1085  for (i = 0; i < 1000; i += channels) {
1086  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1087  int a = s->sample_array[idx];
1088  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1089  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1090  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1091  int score = a - d;
1092  if (h < score && (b ^ c) < 0) {
1093  h = score;
1094  i_start = idx;
1095  }
1096  }
1097  }
1098 
1099  s->last_i_start = i_start;
1100  } else {
1101  i_start = s->last_i_start;
1102  }
1103 
1104  if (s->show_mode == SHOW_MODE_WAVES) {
1105  SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
1106 
1107  /* total height for one channel */
1108  h = s->height / nb_display_channels;
1109  /* graph height / 2 */
1110  h2 = (h * 9) / 20;
1111  for (ch = 0; ch < nb_display_channels; ch++) {
1112  i = i_start + ch;
1113  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1114  for (x = 0; x < s->width; x++) {
1115  y = (s->sample_array[i] * h2) >> 15;
1116  if (y < 0) {
1117  y = -y;
1118  ys = y1 - y;
1119  } else {
1120  ys = y1;
1121  }
1122  fill_rectangle(s->xleft + x, ys, 1, y);
1123  i += channels;
1124  if (i >= SAMPLE_ARRAY_SIZE)
1125  i -= SAMPLE_ARRAY_SIZE;
1126  }
1127  }
1128 
1129  SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
1130 
1131  for (ch = 1; ch < nb_display_channels; ch++) {
1132  y = s->ytop + ch * h;
1133  fill_rectangle(s->xleft, y, s->width, 1);
1134  }
1135  } else {
1136  if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
1137  return;
1138 
1139  if (s->xpos >= s->width)
1140  s->xpos = 0;
1141  nb_display_channels= FFMIN(nb_display_channels, 2);
1142  if (rdft_bits != s->rdft_bits) {
1143  av_rdft_end(s->rdft);
1144  av_free(s->rdft_data);
1145  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
1146  s->rdft_bits = rdft_bits;
1147  s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
1148  }
1149  if (!s->rdft || !s->rdft_data){
1150  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1151  s->show_mode = SHOW_MODE_WAVES;
1152  } else {
1153  FFTSample *data[2];
1154  SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
1155  uint32_t *pixels;
1156  int pitch;
1157  for (ch = 0; ch < nb_display_channels; ch++) {
1158  data[ch] = s->rdft_data + 2 * nb_freq * ch;
1159  i = i_start + ch;
1160  for (x = 0; x < 2 * nb_freq; x++) {
1161  double w = (x-nb_freq) * (1.0 / nb_freq);
1162  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
1163  i += channels;
1164  if (i >= SAMPLE_ARRAY_SIZE)
1165  i -= SAMPLE_ARRAY_SIZE;
1166  }
1167  av_rdft_calc(s->rdft, data[ch]);
1168  }
1169  /* Least efficient way to do this, we should of course
1170  * directly access it but it is more than fast enough. */
1171  if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
1172  pitch >>= 2;
1173  pixels += pitch * s->height;
1174  for (y = 0; y < s->height; y++) {
1175  double w = 1 / sqrt(nb_freq);
1176  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1177  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][2 * y + 0], data[1][2 * y + 1]))
1178  : a;
1179  a = FFMIN(a, 255);
1180  b = FFMIN(b, 255);
1181  pixels -= pitch;
1182  *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
1183  }
1184  SDL_UnlockTexture(s->vis_texture);
1185  }
1186  SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
1187  }
1188  if (!s->paused)
1189  s->xpos++;
1190  }
1191 }
1192 
1193 static void stream_component_close(VideoState *is, int stream_index)
1194 {
1195  AVFormatContext *ic = is->ic;
1196  AVCodecParameters *codecpar;
1197 
1198  if (stream_index < 0 || stream_index >= ic->nb_streams)
1199  return;
1200  codecpar = ic->streams[stream_index]->codecpar;
1201 
1202  switch (codecpar->codec_type) {
1203  case AVMEDIA_TYPE_AUDIO:
1204  decoder_abort(&is->auddec, &is->sampq);
1205  SDL_CloseAudioDevice(audio_dev);
1206  decoder_destroy(&is->auddec);
1207  swr_free(&is->swr_ctx);
1208  av_freep(&is->audio_buf1);
1209  is->audio_buf1_size = 0;
1210  is->audio_buf = NULL;
1211 
1212  if (is->rdft) {
1213  av_rdft_end(is->rdft);
1214  av_freep(&is->rdft_data);
1215  is->rdft = NULL;
1216  is->rdft_bits = 0;
1217  }
1218  break;
1219  case AVMEDIA_TYPE_VIDEO:
1220  decoder_abort(&is->viddec, &is->pictq);
1221  decoder_destroy(&is->viddec);
1222  break;
1223  case AVMEDIA_TYPE_SUBTITLE:
1224  decoder_abort(&is->subdec, &is->subpq);
1225  decoder_destroy(&is->subdec);
1226  break;
1227  default:
1228  break;
1229  }
1230 
1231  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1232  switch (codecpar->codec_type) {
1233  case AVMEDIA_TYPE_AUDIO:
1234  is->audio_st = NULL;
1235  is->audio_stream = -1;
1236  break;
1237  case AVMEDIA_TYPE_VIDEO:
1238  is->video_st = NULL;
1239  is->video_stream = -1;
1240  break;
1241  case AVMEDIA_TYPE_SUBTITLE:
1242  is->subtitle_st = NULL;
1243  is->subtitle_stream = -1;
1244  break;
1245  default:
1246  break;
1247  }
1248 }
1249 
1251 {
1252  /* XXX: use a special url_shutdown call to abort parse cleanly */
1253  is->abort_request = 1;
1254  SDL_WaitThread(is->read_tid, NULL);
1255 
1256  /* close each stream */
1257  if (is->audio_stream >= 0)
1258  stream_component_close(is, is->audio_stream);
1259  if (is->video_stream >= 0)
1260  stream_component_close(is, is->video_stream);
1261  if (is->subtitle_stream >= 0)
1262  stream_component_close(is, is->subtitle_stream);
1263 
1264  avformat_close_input(&is->ic);
1265 
1266  packet_queue_destroy(&is->videoq);
1267  packet_queue_destroy(&is->audioq);
1268  packet_queue_destroy(&is->subtitleq);
1269 
1270  /* free all pictures */
1271  frame_queue_destory(&is->pictq);
1272  frame_queue_destory(&is->sampq);
1273  frame_queue_destory(&is->subpq);
1274  SDL_DestroyCond(is->continue_read_thread);
1275  sws_freeContext(is->img_convert_ctx);
1276  sws_freeContext(is->sub_convert_ctx);
1277  av_free(is->filename);
1278  if (is->vis_texture)
1279  SDL_DestroyTexture(is->vis_texture);
1280  if (is->vid_texture)
1281  SDL_DestroyTexture(is->vid_texture);
1282  if (is->sub_texture)
1283  SDL_DestroyTexture(is->sub_texture);
1284  av_free(is);
1285 }
1286 
1287 static void do_exit(VideoState *is)
1288 {
1289  if (is) {
1290  stream_close(is);
1291  }
1292  if (renderer)
1293  SDL_DestroyRenderer(renderer);
1294  if (window)
1295  SDL_DestroyWindow(window);
1296  uninit_opts();
1297 #if CONFIG_AVFILTER
1298  av_freep(&vfilters_list);
1299 #endif
1301  if (show_status)
1302  printf("\n");
1303  SDL_Quit();
1304  av_log(NULL, AV_LOG_QUIET, "%s", "");
1305  exit(0);
1306 }
1307 
1308 static void sigterm_handler(int sig)
1309 {
1310  exit(123);
1311 }
1312 
1314 {
1315  SDL_Rect rect;
1316  int max_width = screen_width ? screen_width : INT_MAX;
1317  int max_height = screen_height ? screen_height : INT_MAX;
1318  if (max_width == INT_MAX && max_height == INT_MAX)
1319  max_height = height;
1320  calculate_display_rect(&rect, 0, 0, max_width, max_height, width, height, sar);
1321  default_width = rect.w;
1322  default_height = rect.h;
1323 }
1324 
1326 {
1327  int w,h;
1328 
1331 
1332  if (!window_title)
1334  SDL_SetWindowTitle(window, window_title);
1335 
1336  SDL_SetWindowSize(window, w, h);
1337  SDL_SetWindowPosition(window, screen_left, screen_top);
1338  if (is_full_screen)
1339  SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1340  SDL_ShowWindow(window);
1341 
1342  is->width = w;
1343  is->height = h;
1344 
1345  return 0;
1346 }
1347 
1348 /* display the current picture, if any */
1350 {
1351  if (!is->width)
1352  video_open(is);
1353 
1354  SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1355  SDL_RenderClear(renderer);
1356  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1358  else if (is->video_st)
1360  SDL_RenderPresent(renderer);
1361 }
1362 
1363 static double get_clock(Clock *c)
1364 {
1365  if (*c->queue_serial != c->serial)
1366  return NAN;
1367  if (c->paused) {
1368  return c->pts;
1369  } else {
1370  double time = av_gettime_relative() / 1000000.0;
1371  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1372  }
1373 }
1374 
1375 static void set_clock_at(Clock *c, double pts, int serial, double time)
1376 {
1377  c->pts = pts;
1378  c->last_updated = time;
1379  c->pts_drift = c->pts - time;
1380  c->serial = serial;
1381 }
1382 
1383 static void set_clock(Clock *c, double pts, int serial)
1384 {
1385  double time = av_gettime_relative() / 1000000.0;
1386  set_clock_at(c, pts, serial, time);
1387 }
1388 
1389 static void set_clock_speed(Clock *c, double speed)
1390 {
1391  set_clock(c, get_clock(c), c->serial);
1392  c->speed = speed;
1393 }
1394 
1395 static void init_clock(Clock *c, int *queue_serial)
1396 {
1397  c->speed = 1.0;
1398  c->paused = 0;
1399  c->queue_serial = queue_serial;
1400  set_clock(c, NAN, -1);
1401 }
1402 
1403 static void sync_clock_to_slave(Clock *c, Clock *slave)
1404 {
1405  double clock = get_clock(c);
1406  double slave_clock = get_clock(slave);
1407  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1408  set_clock(c, slave_clock, slave->serial);
1409 }
1410 
1412  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1413  if (is->video_st)
1414  return AV_SYNC_VIDEO_MASTER;
1415  else
1416  return AV_SYNC_AUDIO_MASTER;
1417  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1418  if (is->audio_st)
1419  return AV_SYNC_AUDIO_MASTER;
1420  else
1421  return AV_SYNC_EXTERNAL_CLOCK;
1422  } else {
1423  return AV_SYNC_EXTERNAL_CLOCK;
1424  }
1425 }
1426 
1427 /* get the current master clock value */
1429 {
1430  double val;
1431 
1432  switch (get_master_sync_type(is)) {
1433  case AV_SYNC_VIDEO_MASTER:
1434  val = get_clock(&is->vidclk);
1435  break;
1436  case AV_SYNC_AUDIO_MASTER:
1437  val = get_clock(&is->audclk);
1438  break;
1439  default:
1440  val = get_clock(&is->extclk);
1441  break;
1442  }
1443  return val;
1444 }
1445 
1447  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1448  is->audio_stream >= 0 && is->audioq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES) {
1450  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1451  (is->audio_stream < 0 || is->audioq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES)) {
1453  } else {
1454  double speed = is->extclk.speed;
1455  if (speed != 1.0)
1456  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1457  }
1458 }
1459 
1460 /* seek in the stream */
1461 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int by_bytes)
1462 {
1463  if (!is->seek_req) {
1464  is->seek_pos = pos;
1465  is->seek_rel = rel;
1466  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1467  if (by_bytes)
1468  is->seek_flags |= AVSEEK_FLAG_BYTE;
1469  is->seek_req = 1;
1470  SDL_CondSignal(is->continue_read_thread);
1471  }
1472 }
1473 
1474 /* pause or resume the video */
1476 {
1477  if (is->paused) {
1478  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1479  if (is->read_pause_return != AVERROR(ENOSYS)) {
1480  is->vidclk.paused = 0;
1481  }
1482  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1483  }
1484  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1485  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1486 }
1487 
1489 {
1491  is->step = 0;
1492 }
1493 
1495 {
1496  is->muted = !is->muted;
1497 }
1498 
1499 static void update_volume(VideoState *is, int sign, double step)
1500 {
1501  double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1502  int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0));
1503  is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1504 }
1505 
1507 {
1508  /* if the stream is paused unpause it, then step */
1509  if (is->paused)
1511  is->step = 1;
1512 }
1513 
1514 static double compute_target_delay(double delay, VideoState *is)
1515 {
1516  double sync_threshold, diff = 0;
1517 
1518  /* update delay to follow master synchronisation source */
1520  /* if video is slave, we try to correct big delays by
1521  duplicating or deleting a frame */
1522  diff = get_clock(&is->vidclk) - get_master_clock(is);
1523 
1524  /* skip or repeat frame. We take into account the
1525  delay to compute the threshold. I still don't know
1526  if it is the best guess */
1527  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1528  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1529  if (diff <= -sync_threshold)
1530  delay = FFMAX(0, delay + diff);
1531  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1532  delay = delay + diff;
1533  else if (diff >= sync_threshold)
1534  delay = 2 * delay;
1535  }
1536  }
1537 
1538  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1539  delay, -diff);
1540 
1541  return delay;
1542 }
1543 
1544 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1545  if (vp->serial == nextvp->serial) {
1546  double duration = nextvp->pts - vp->pts;
1547  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1548  return vp->duration;
1549  else
1550  return duration;
1551  } else {
1552  return 0.0;
1553  }
1554 }
1555 
1556 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1557  /* update current video pts */
1558  set_clock(&is->vidclk, pts, serial);
1559  sync_clock_to_slave(&is->extclk, &is->vidclk);
1560 }
1561 
1562 /* called to display each frame */
1563 static void video_refresh(void *opaque, double *remaining_time)
1564 {
1565  VideoState *is = opaque;
1566  double time;
1567 
1568  Frame *sp, *sp2;
1569 
1570  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1572 
1573  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1574  time = av_gettime_relative() / 1000000.0;
1575  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1576  video_display(is);
1577  is->last_vis_time = time;
1578  }
1579  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1580  }
1581 
1582  if (is->video_st) {
1583 retry:
1584  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1585  // nothing to do, no picture to display in the queue
1586  } else {
1587  double last_duration, duration, delay;
1588  Frame *vp, *lastvp;
1589 
1590  /* dequeue the picture */
1591  lastvp = frame_queue_peek_last(&is->pictq);
1592  vp = frame_queue_peek(&is->pictq);
1593 
1594  if (vp->serial != is->videoq.serial) {
1595  frame_queue_next(&is->pictq);
1596  goto retry;
1597  }
1598 
1599  if (lastvp->serial != vp->serial)
1600  is->frame_timer = av_gettime_relative() / 1000000.0;
1601 
1602  if (is->paused)
1603  goto display;
1604 
1605  /* compute nominal last_duration */
1606  last_duration = vp_duration(is, lastvp, vp);
1607  delay = compute_target_delay(last_duration, is);
1608 
1609  time= av_gettime_relative()/1000000.0;
1610  if (time < is->frame_timer + delay) {
1611  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1612  goto display;
1613  }
1614 
1615  is->frame_timer += delay;
1616  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1617  is->frame_timer = time;
1618 
1619  SDL_LockMutex(is->pictq.mutex);
1620  if (!isnan(vp->pts))
1621  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1622  SDL_UnlockMutex(is->pictq.mutex);
1623 
1624  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1625  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1626  duration = vp_duration(is, vp, nextvp);
1627  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1628  is->frame_drops_late++;
1629  frame_queue_next(&is->pictq);
1630  goto retry;
1631  }
1632  }
1633 
1634  if (is->subtitle_st) {
1635  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1636  sp = frame_queue_peek(&is->subpq);
1637 
1638  if (frame_queue_nb_remaining(&is->subpq) > 1)
1639  sp2 = frame_queue_peek_next(&is->subpq);
1640  else
1641  sp2 = NULL;
1642 
1643  if (sp->serial != is->subtitleq.serial
1644  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1645  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1646  {
1647  if (sp->uploaded) {
1648  int i;
1649  for (i = 0; i < sp->sub.num_rects; i++) {
1650  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1651  uint8_t *pixels;
1652  int pitch, j;
1653 
1654  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
1655  for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1656  memset(pixels, 0, sub_rect->w << 2);
1657  SDL_UnlockTexture(is->sub_texture);
1658  }
1659  }
1660  }
1661  frame_queue_next(&is->subpq);
1662  } else {
1663  break;
1664  }
1665  }
1666  }
1667 
1668  frame_queue_next(&is->pictq);
1669  is->force_refresh = 1;
1670 
1671  if (is->step && !is->paused)
1673  }
1674 display:
1675  /* display picture */
1676  if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1677  video_display(is);
1678  }
1679  is->force_refresh = 0;
1680  if (show_status) {
1681  AVBPrint buf;
1682  static int64_t last_time;
1683  int64_t cur_time;
1684  int aqsize, vqsize, sqsize;
1685  double av_diff;
1686 
1687  cur_time = av_gettime_relative();
1688  if (!last_time || (cur_time - last_time) >= 30000) {
1689  aqsize = 0;
1690  vqsize = 0;
1691  sqsize = 0;
1692  if (is->audio_st)
1693  aqsize = is->audioq.size;
1694  if (is->video_st)
1695  vqsize = is->videoq.size;
1696  if (is->subtitle_st)
1697  sqsize = is->subtitleq.size;
1698  av_diff = 0;
1699  if (is->audio_st && is->video_st)
1700  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1701  else if (is->video_st)
1702  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1703  else if (is->audio_st)
1704  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1705 
1707  av_bprintf(&buf,
1708  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1710  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1711  av_diff,
1712  is->frame_drops_early + is->frame_drops_late,
1713  aqsize / 1024,
1714  vqsize / 1024,
1715  sqsize,
1716  is->video_st ? is->viddec.avctx->pts_correction_num_faulty_dts : 0,
1717  is->video_st ? is->viddec.avctx->pts_correction_num_faulty_pts : 0);
1718 
1719  if (show_status == 1 && AV_LOG_INFO > av_log_get_level())
1720  fprintf(stderr, "%s", buf.str);
1721  else
1722  av_log(NULL, AV_LOG_INFO, "%s", buf.str);
1723 
1724  fflush(stderr);
1725  av_bprint_finalize(&buf, NULL);
1726 
1727  last_time = cur_time;
1728  }
1729  }
1730 }
1731 
1732 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1733 {
1734  Frame *vp;
1735 
1736 #if defined(DEBUG_SYNC)
1737  printf("frame_type=%c pts=%0.3f\n",
1738  av_get_picture_type_char(src_frame->pict_type), pts);
1739 #endif
1740 
1741  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1742  return -1;
1743 
1744  vp->sar = src_frame->sample_aspect_ratio;
1745  vp->uploaded = 0;
1746 
1747  vp->width = src_frame->width;
1748  vp->height = src_frame->height;
1749  vp->format = src_frame->format;
1750 
1751  vp->pts = pts;
1752  vp->duration = duration;
1753  vp->pos = pos;
1754  vp->serial = serial;
1755 
1756  set_default_window_size(vp->width, vp->height, vp->sar);
1757 
1758  av_frame_move_ref(vp->frame, src_frame);
1759  frame_queue_push(&is->pictq);
1760  return 0;
1761 }
1762 
1764 {
1765  int got_picture;
1766 
1767  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1768  return -1;
1769 
1770  if (got_picture) {
1771  double dpts = NAN;
1772 
1773  if (frame->pts != AV_NOPTS_VALUE)
1774  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1775 
1776  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1777 
1779  if (frame->pts != AV_NOPTS_VALUE) {
1780  double diff = dpts - get_master_clock(is);
1781  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1782  diff - is->frame_last_filter_delay < 0 &&
1783  is->viddec.pkt_serial == is->vidclk.serial &&
1784  is->videoq.nb_packets) {
1785  is->frame_drops_early++;
1787  got_picture = 0;
1788  }
1789  }
1790  }
1791  }
1792 
1793  return got_picture;
1794 }
1795 
1796 #if CONFIG_AVFILTER
1797 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1798  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1799 {
1800  int ret, i;
1801  int nb_filters = graph->nb_filters;
1803 
1804  if (filtergraph) {
1807  if (!outputs || !inputs) {
1808  ret = AVERROR(ENOMEM);
1809  goto fail;
1810  }
1811 
1812  outputs->name = av_strdup("in");
1813  outputs->filter_ctx = source_ctx;
1814  outputs->pad_idx = 0;
1815  outputs->next = NULL;
1816 
1817  inputs->name = av_strdup("out");
1818  inputs->filter_ctx = sink_ctx;
1819  inputs->pad_idx = 0;
1820  inputs->next = NULL;
1821 
1822  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1823  goto fail;
1824  } else {
1825  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1826  goto fail;
1827  }
1828 
1829  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1830  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1831  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1832 
1833  ret = avfilter_graph_config(graph, NULL);
1834 fail:
1837  return ret;
1838 }
1839 
1840 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1841 {
1843  char sws_flags_str[512] = "";
1844  char buffersrc_args[256];
1845  int ret;
1846  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1847  AVCodecParameters *codecpar = is->video_st->codecpar;
1848  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1849  const AVDictionaryEntry *e = NULL;
1850  int nb_pix_fmts = 0;
1851  int i, j;
1852 
1853  for (i = 0; i < renderer_info.num_texture_formats; i++) {
1854  for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; j++) {
1855  if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
1856  pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
1857  break;
1858  }
1859  }
1860  }
1861  pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE;
1862 
1863  while ((e = av_dict_iterate(sws_dict, e))) {
1864  if (!strcmp(e->key, "sws_flags")) {
1865  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1866  } else
1867  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1868  }
1869  if (strlen(sws_flags_str))
1870  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1871 
1872  graph->scale_sws_opts = av_strdup(sws_flags_str);
1873 
1874  snprintf(buffersrc_args, sizeof(buffersrc_args),
1875  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1876  frame->width, frame->height, frame->format,
1877  is->video_st->time_base.num, is->video_st->time_base.den,
1878  codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1));
1879  if (fr.num && fr.den)
1880  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1881 
1882  if ((ret = avfilter_graph_create_filter(&filt_src,
1883  avfilter_get_by_name("buffer"),
1884  "ffplay_buffer", buffersrc_args, NULL,
1885  graph)) < 0)
1886  goto fail;
1887 
1888  ret = avfilter_graph_create_filter(&filt_out,
1889  avfilter_get_by_name("buffersink"),
1890  "ffplay_buffersink", NULL, NULL, graph);
1891  if (ret < 0)
1892  goto fail;
1893 
1894  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1895  goto fail;
1896 
1897  last_filter = filt_out;
1898 
1899 /* Note: this macro adds a filter before the lastly added filter, so the
1900  * processing order of the filters is in reverse */
1901 #define INSERT_FILT(name, arg) do { \
1902  AVFilterContext *filt_ctx; \
1903  \
1904  ret = avfilter_graph_create_filter(&filt_ctx, \
1905  avfilter_get_by_name(name), \
1906  "ffplay_" name, arg, NULL, graph); \
1907  if (ret < 0) \
1908  goto fail; \
1909  \
1910  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1911  if (ret < 0) \
1912  goto fail; \
1913  \
1914  last_filter = filt_ctx; \
1915 } while (0)
1916 
1917  if (autorotate) {
1918  double theta = 0.0;
1919  int32_t *displaymatrix = NULL;
1921  if (sd)
1922  displaymatrix = (int32_t *)sd->data;
1923  if (!displaymatrix)
1924  displaymatrix = (int32_t *)av_stream_get_side_data(is->video_st, AV_PKT_DATA_DISPLAYMATRIX, NULL);
1925  theta = get_rotation(displaymatrix);
1926 
1927  if (fabs(theta - 90) < 1.0) {
1928  INSERT_FILT("transpose", "clock");
1929  } else if (fabs(theta - 180) < 1.0) {
1930  INSERT_FILT("hflip", NULL);
1931  INSERT_FILT("vflip", NULL);
1932  } else if (fabs(theta - 270) < 1.0) {
1933  INSERT_FILT("transpose", "cclock");
1934  } else if (fabs(theta) > 1.0) {
1935  char rotate_buf[64];
1936  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1937  INSERT_FILT("rotate", rotate_buf);
1938  }
1939  }
1940 
1941  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1942  goto fail;
1943 
1944  is->in_video_filter = filt_src;
1945  is->out_video_filter = filt_out;
1946 
1947 fail:
1948  return ret;
1949 }
1950 
1951 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1952 {
1954  int sample_rates[2] = { 0, -1 };
1955  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1956  char aresample_swr_opts[512] = "";
1957  const AVDictionaryEntry *e = NULL;
1958  AVBPrint bp;
1959  char asrc_args[256];
1960  int ret;
1961 
1962  avfilter_graph_free(&is->agraph);
1963  if (!(is->agraph = avfilter_graph_alloc()))
1964  return AVERROR(ENOMEM);
1965  is->agraph->nb_threads = filter_nbthreads;
1966 
1968 
1969  while ((e = av_dict_iterate(swr_opts, e)))
1970  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
1971  if (strlen(aresample_swr_opts))
1972  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
1973  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
1974 
1975  av_channel_layout_describe_bprint(&is->audio_filter_src.ch_layout, &bp);
1976 
1977  ret = snprintf(asrc_args, sizeof(asrc_args),
1978  "sample_rate=%d:sample_fmt=%s:time_base=%d/%d:channel_layout=%s",
1979  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1980  1, is->audio_filter_src.freq, bp.str);
1981 
1982  ret = avfilter_graph_create_filter(&filt_asrc,
1983  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1984  asrc_args, NULL, is->agraph);
1985  if (ret < 0)
1986  goto end;
1987 
1988 
1989  ret = avfilter_graph_create_filter(&filt_asink,
1990  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1991  NULL, NULL, is->agraph);
1992  if (ret < 0)
1993  goto end;
1994 
1995  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1996  goto end;
1997  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1998  goto end;
1999 
2000  if (force_output_format) {
2001  sample_rates [0] = is->audio_tgt.freq;
2002  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
2003  goto end;
2004  if ((ret = av_opt_set(filt_asink, "ch_layouts", bp.str, AV_OPT_SEARCH_CHILDREN)) < 0)
2005  goto end;
2006  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2007  goto end;
2008  }
2009 
2010 
2011  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2012  goto end;
2013 
2014  is->in_audio_filter = filt_asrc;
2015  is->out_audio_filter = filt_asink;
2016 
2017 end:
2018  if (ret < 0)
2019  avfilter_graph_free(&is->agraph);
2020  av_bprint_finalize(&bp, NULL);
2021 
2022  return ret;
2023 }
2024 #endif /* CONFIG_AVFILTER */
2025 
2026 static int audio_thread(void *arg)
2027 {
2028  VideoState *is = arg;
2030  Frame *af;
2031 #if CONFIG_AVFILTER
2032  int last_serial = -1;
2033  int reconfigure;
2034 #endif
2035  int got_frame = 0;
2036  AVRational tb;
2037  int ret = 0;
2038 
2039  if (!frame)
2040  return AVERROR(ENOMEM);
2041 
2042  do {
2043  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2044  goto the_end;
2045 
2046  if (got_frame) {
2047  tb = (AVRational){1, frame->sample_rate};
2048 
2049 #if CONFIG_AVFILTER
2050  reconfigure =
2051  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.ch_layout.nb_channels,
2052  frame->format, frame->ch_layout.nb_channels) ||
2053  av_channel_layout_compare(&is->audio_filter_src.ch_layout, &frame->ch_layout) ||
2054  is->audio_filter_src.freq != frame->sample_rate ||
2055  is->auddec.pkt_serial != last_serial;
2056 
2057  if (reconfigure) {
2058  char buf1[1024], buf2[1024];
2059  av_channel_layout_describe(&is->audio_filter_src.ch_layout, buf1, sizeof(buf1));
2060  av_channel_layout_describe(&frame->ch_layout, buf2, sizeof(buf2));
2062  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2063  is->audio_filter_src.freq, is->audio_filter_src.ch_layout.nb_channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2064  frame->sample_rate, frame->ch_layout.nb_channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
2065 
2066  is->audio_filter_src.fmt = frame->format;
2067  ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &frame->ch_layout);
2068  if (ret < 0)
2069  goto the_end;
2070  is->audio_filter_src.freq = frame->sample_rate;
2071  last_serial = is->auddec.pkt_serial;
2072 
2073  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2074  goto the_end;
2075  }
2076 
2077  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2078  goto the_end;
2079 
2080  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2081  tb = av_buffersink_get_time_base(is->out_audio_filter);
2082 #endif
2083  if (!(af = frame_queue_peek_writable(&is->sampq)))
2084  goto the_end;
2085 
2086  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2087  af->pos = frame->pkt_pos;
2088  af->serial = is->auddec.pkt_serial;
2089  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2090 
2092  frame_queue_push(&is->sampq);
2093 
2094 #if CONFIG_AVFILTER
2095  if (is->audioq.serial != is->auddec.pkt_serial)
2096  break;
2097  }
2098  if (ret == AVERROR_EOF)
2099  is->auddec.finished = is->auddec.pkt_serial;
2100 #endif
2101  }
2102  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2103  the_end:
2104 #if CONFIG_AVFILTER
2105  avfilter_graph_free(&is->agraph);
2106 #endif
2107  av_frame_free(&frame);
2108  return ret;
2109 }
2110 
2111 static int decoder_start(Decoder *d, int (*fn)(void *), const char *thread_name, void* arg)
2112 {
2113  packet_queue_start(d->queue);
2114  d->decoder_tid = SDL_CreateThread(fn, thread_name, arg);
2115  if (!d->decoder_tid) {
2116  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2117  return AVERROR(ENOMEM);
2118  }
2119  return 0;
2120 }
2121 
2122 static int video_thread(void *arg)
2123 {
2124  VideoState *is = arg;
2126  double pts;
2127  double duration;
2128  int ret;
2129  AVRational tb = is->video_st->time_base;
2130  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2131 
2132 #if CONFIG_AVFILTER
2133  AVFilterGraph *graph = NULL;
2134  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2135  int last_w = 0;
2136  int last_h = 0;
2137  enum AVPixelFormat last_format = -2;
2138  int last_serial = -1;
2139  int last_vfilter_idx = 0;
2140 #endif
2141 
2142  if (!frame)
2143  return AVERROR(ENOMEM);
2144 
2145  for (;;) {
2147  if (ret < 0)
2148  goto the_end;
2149  if (!ret)
2150  continue;
2151 
2152 #if CONFIG_AVFILTER
2153  if ( last_w != frame->width
2154  || last_h != frame->height
2155  || last_format != frame->format
2156  || last_serial != is->viddec.pkt_serial
2157  || last_vfilter_idx != is->vfilter_idx) {
2159  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2160  last_w, last_h,
2161  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2162  frame->width, frame->height,
2163  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2164  avfilter_graph_free(&graph);
2165  graph = avfilter_graph_alloc();
2166  if (!graph) {
2167  ret = AVERROR(ENOMEM);
2168  goto the_end;
2169  }
2170  graph->nb_threads = filter_nbthreads;
2171  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2172  SDL_Event event;
2173  event.type = FF_QUIT_EVENT;
2174  event.user.data1 = is;
2175  SDL_PushEvent(&event);
2176  goto the_end;
2177  }
2178  filt_in = is->in_video_filter;
2179  filt_out = is->out_video_filter;
2180  last_w = frame->width;
2181  last_h = frame->height;
2182  last_format = frame->format;
2183  last_serial = is->viddec.pkt_serial;
2184  last_vfilter_idx = is->vfilter_idx;
2185  frame_rate = av_buffersink_get_frame_rate(filt_out);
2186  }
2187 
2188  ret = av_buffersrc_add_frame(filt_in, frame);
2189  if (ret < 0)
2190  goto the_end;
2191 
2192  while (ret >= 0) {
2193  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2194 
2195  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2196  if (ret < 0) {
2197  if (ret == AVERROR_EOF)
2198  is->viddec.finished = is->viddec.pkt_serial;
2199  ret = 0;
2200  break;
2201  }
2202 
2203  is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
2204  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2205  is->frame_last_filter_delay = 0;
2206  tb = av_buffersink_get_time_base(filt_out);
2207 #endif
2208  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2209  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2210  ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);
2212 #if CONFIG_AVFILTER
2213  if (is->videoq.serial != is->viddec.pkt_serial)
2214  break;
2215  }
2216 #endif
2217 
2218  if (ret < 0)
2219  goto the_end;
2220  }
2221  the_end:
2222 #if CONFIG_AVFILTER
2223  avfilter_graph_free(&graph);
2224 #endif
2225  av_frame_free(&frame);
2226  return 0;
2227 }
2228 
2229 static int subtitle_thread(void *arg)
2230 {
2231  VideoState *is = arg;
2232  Frame *sp;
2233  int got_subtitle;
2234  double pts;
2235 
2236  for (;;) {
2237  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2238  return 0;
2239 
2240  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2241  break;
2242 
2243  pts = 0;
2244 
2245  if (got_subtitle && sp->sub.format == 0) {
2246  if (sp->sub.pts != AV_NOPTS_VALUE)
2247  pts = sp->sub.pts / (double)AV_TIME_BASE;
2248  sp->pts = pts;
2249  sp->serial = is->subdec.pkt_serial;
2250  sp->width = is->subdec.avctx->width;
2251  sp->height = is->subdec.avctx->height;
2252  sp->uploaded = 0;
2253 
2254  /* now we can update the picture count */
2255  frame_queue_push(&is->subpq);
2256  } else if (got_subtitle) {
2257  avsubtitle_free(&sp->sub);
2258  }
2259  }
2260  return 0;
2261 }
2262 
2263 /* copy samples for viewing in editor window */
2264 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2265 {
2266  int size, len;
2267 
2268  size = samples_size / sizeof(short);
2269  while (size > 0) {
2270  len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2271  if (len > size)
2272  len = size;
2273  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2274  samples += len;
2275  is->sample_array_index += len;
2276  if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2277  is->sample_array_index = 0;
2278  size -= len;
2279  }
2280 }
2281 
2282 /* return the wanted number of samples to get better sync if sync_type is video
2283  * or external master clock */
2284 static int synchronize_audio(VideoState *is, int nb_samples)
2285 {
2286  int wanted_nb_samples = nb_samples;
2287 
2288  /* if not master, then we try to remove or add samples to correct the clock */
2290  double diff, avg_diff;
2291  int min_nb_samples, max_nb_samples;
2292 
2293  diff = get_clock(&is->audclk) - get_master_clock(is);
2294 
2295  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2296  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2297  if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2298  /* not enough measures to have a correct estimate */
2299  is->audio_diff_avg_count++;
2300  } else {
2301  /* estimate the A-V difference */
2302  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2303 
2304  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2305  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2306  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2307  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2308  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2309  }
2310  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2311  diff, avg_diff, wanted_nb_samples - nb_samples,
2312  is->audio_clock, is->audio_diff_threshold);
2313  }
2314  } else {
2315  /* too big difference : may be initial PTS errors, so
2316  reset A-V filter */
2317  is->audio_diff_avg_count = 0;
2318  is->audio_diff_cum = 0;
2319  }
2320  }
2321 
2322  return wanted_nb_samples;
2323 }
2324 
2325 /**
2326  * Decode one audio frame and return its uncompressed size.
2327  *
2328  * The processed audio frame is decoded, converted if required, and
2329  * stored in is->audio_buf, with size in bytes given by the return
2330  * value.
2331  */
2333 {
2334  int data_size, resampled_data_size;
2335  av_unused double audio_clock0;
2336  int wanted_nb_samples;
2337  Frame *af;
2338 
2339  if (is->paused)
2340  return -1;
2341 
2342  do {
2343 #if defined(_WIN32)
2344  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2345  if ((av_gettime_relative() - audio_callback_time) > 1000000LL * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec / 2)
2346  return -1;
2347  av_usleep (1000);
2348  }
2349 #endif
2350  if (!(af = frame_queue_peek_readable(&is->sampq)))
2351  return -1;
2352  frame_queue_next(&is->sampq);
2353  } while (af->serial != is->audioq.serial);
2354 
2356  af->frame->nb_samples,
2357  af->frame->format, 1);
2358 
2359  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2360 
2361  if (af->frame->format != is->audio_src.fmt ||
2362  av_channel_layout_compare(&af->frame->ch_layout, &is->audio_src.ch_layout) ||
2363  af->frame->sample_rate != is->audio_src.freq ||
2364  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2365  swr_free(&is->swr_ctx);
2366  swr_alloc_set_opts2(&is->swr_ctx,
2367  &is->audio_tgt.ch_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2368  &af->frame->ch_layout, af->frame->format, af->frame->sample_rate,
2369  0, NULL);
2370  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2372  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2374  is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.ch_layout.nb_channels);
2375  swr_free(&is->swr_ctx);
2376  return -1;
2377  }
2378  if (av_channel_layout_copy(&is->audio_src.ch_layout, &af->frame->ch_layout) < 0)
2379  return -1;
2380  is->audio_src.freq = af->frame->sample_rate;
2381  is->audio_src.fmt = af->frame->format;
2382  }
2383 
2384  if (is->swr_ctx) {
2385  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2386  uint8_t **out = &is->audio_buf1;
2387  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2388  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.ch_layout.nb_channels, out_count, is->audio_tgt.fmt, 0);
2389  int len2;
2390  if (out_size < 0) {
2391  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2392  return -1;
2393  }
2394  if (wanted_nb_samples != af->frame->nb_samples) {
2395  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2396  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2397  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2398  return -1;
2399  }
2400  }
2401  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2402  if (!is->audio_buf1)
2403  return AVERROR(ENOMEM);
2404  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2405  if (len2 < 0) {
2406  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2407  return -1;
2408  }
2409  if (len2 == out_count) {
2410  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2411  if (swr_init(is->swr_ctx) < 0)
2412  swr_free(&is->swr_ctx);
2413  }
2414  is->audio_buf = is->audio_buf1;
2415  resampled_data_size = len2 * is->audio_tgt.ch_layout.nb_channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2416  } else {
2417  is->audio_buf = af->frame->data[0];
2418  resampled_data_size = data_size;
2419  }
2420 
2421  audio_clock0 = is->audio_clock;
2422  /* update the audio clock with the pts */
2423  if (!isnan(af->pts))
2424  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2425  else
2426  is->audio_clock = NAN;
2427  is->audio_clock_serial = af->serial;
2428 #ifdef DEBUG
2429  {
2430  static double last_clock;
2431  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2432  is->audio_clock - last_clock,
2433  is->audio_clock, audio_clock0);
2434  last_clock = is->audio_clock;
2435  }
2436 #endif
2437  return resampled_data_size;
2438 }
2439 
2440 /* prepare a new audio buffer */
2441 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2442 {
2443  VideoState *is = opaque;
2444  int audio_size, len1;
2445 
2447 
2448  while (len > 0) {
2449  if (is->audio_buf_index >= is->audio_buf_size) {
2450  audio_size = audio_decode_frame(is);
2451  if (audio_size < 0) {
2452  /* if error, just output silence */
2453  is->audio_buf = NULL;
2454  is->audio_buf_size = SDL_AUDIO_MIN_BUFFER_SIZE / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2455  } else {
2456  if (is->show_mode != SHOW_MODE_VIDEO)
2457  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2458  is->audio_buf_size = audio_size;
2459  }
2460  is->audio_buf_index = 0;
2461  }
2462  len1 = is->audio_buf_size - is->audio_buf_index;
2463  if (len1 > len)
2464  len1 = len;
2465  if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2466  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2467  else {
2468  memset(stream, 0, len1);
2469  if (!is->muted && is->audio_buf)
2470  SDL_MixAudioFormat(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, is->audio_volume);
2471  }
2472  len -= len1;
2473  stream += len1;
2474  is->audio_buf_index += len1;
2475  }
2476  is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2477  /* Let's assume the audio driver that is used by SDL has two periods. */
2478  if (!isnan(is->audio_clock)) {
2479  set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
2480  sync_clock_to_slave(&is->extclk, &is->audclk);
2481  }
2482 }
2483 
2484 static int audio_open(void *opaque, AVChannelLayout *wanted_channel_layout, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2485 {
2486  SDL_AudioSpec wanted_spec, spec;
2487  const char *env;
2488  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2489  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2490  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2491  int wanted_nb_channels = wanted_channel_layout->nb_channels;
2492 
2493  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2494  if (env) {
2495  wanted_nb_channels = atoi(env);
2496  av_channel_layout_uninit(wanted_channel_layout);
2497  av_channel_layout_default(wanted_channel_layout, wanted_nb_channels);
2498  }
2499  if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) {
2500  av_channel_layout_uninit(wanted_channel_layout);
2501  av_channel_layout_default(wanted_channel_layout, wanted_nb_channels);
2502  }
2503  wanted_nb_channels = wanted_channel_layout->nb_channels;
2504  wanted_spec.channels = wanted_nb_channels;
2505  wanted_spec.freq = wanted_sample_rate;
2506  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2507  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2508  return -1;
2509  }
2510  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2511  next_sample_rate_idx--;
2512  wanted_spec.format = AUDIO_S16SYS;
2513  wanted_spec.silence = 0;
2514  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2515  wanted_spec.callback = sdl_audio_callback;
2516  wanted_spec.userdata = opaque;
2517  while (!(audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2518  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2519  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2520  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2521  if (!wanted_spec.channels) {
2522  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2523  wanted_spec.channels = wanted_nb_channels;
2524  if (!wanted_spec.freq) {
2526  "No more combinations to try, audio open failed\n");
2527  return -1;
2528  }
2529  }
2530  av_channel_layout_default(wanted_channel_layout, wanted_spec.channels);
2531  }
2532  if (spec.format != AUDIO_S16SYS) {
2534  "SDL advised audio format %d is not supported!\n", spec.format);
2535  return -1;
2536  }
2537  if (spec.channels != wanted_spec.channels) {
2538  av_channel_layout_uninit(wanted_channel_layout);
2539  av_channel_layout_default(wanted_channel_layout, spec.channels);
2540  if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) {
2542  "SDL advised channel count %d is not supported!\n", spec.channels);
2543  return -1;
2544  }
2545  }
2546 
2547  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2548  audio_hw_params->freq = spec.freq;
2549  if (av_channel_layout_copy(&audio_hw_params->ch_layout, wanted_channel_layout) < 0)
2550  return -1;
2551  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, 1, audio_hw_params->fmt, 1);
2552  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2553  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2554  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2555  return -1;
2556  }
2557  return spec.size;
2558 }
2559 
2560 /* open a given stream. Return 0 if OK */
2561 static int stream_component_open(VideoState *is, int stream_index)
2562 {
2563  AVFormatContext *ic = is->ic;
2564  AVCodecContext *avctx;
2565  const AVCodec *codec;
2566  const char *forced_codec_name = NULL;
2567  AVDictionary *opts = NULL;
2568  const AVDictionaryEntry *t = NULL;
2569  int sample_rate;
2570  AVChannelLayout ch_layout = { 0 };
2571  int ret = 0;
2572  int stream_lowres = lowres;
2573 
2574  if (stream_index < 0 || stream_index >= ic->nb_streams)
2575  return -1;
2576 
2577  avctx = avcodec_alloc_context3(NULL);
2578  if (!avctx)
2579  return AVERROR(ENOMEM);
2580 
2581  ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2582  if (ret < 0)
2583  goto fail;
2584  avctx->pkt_timebase = ic->streams[stream_index]->time_base;
2585 
2586  codec = avcodec_find_decoder(avctx->codec_id);
2587 
2588  switch(avctx->codec_type){
2589  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2590  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2591  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2592  }
2593  if (forced_codec_name)
2594  codec = avcodec_find_decoder_by_name(forced_codec_name);
2595  if (!codec) {
2596  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2597  "No codec could be found with name '%s'\n", forced_codec_name);
2598  else av_log(NULL, AV_LOG_WARNING,
2599  "No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id));
2600  ret = AVERROR(EINVAL);
2601  goto fail;
2602  }
2603 
2604  avctx->codec_id = codec->id;
2605  if (stream_lowres > codec->max_lowres) {
2606  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2607  codec->max_lowres);
2608  stream_lowres = codec->max_lowres;
2609  }
2610  avctx->lowres = stream_lowres;
2611 
2612  if (fast)
2613  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2614 
2615  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2616  if (!av_dict_get(opts, "threads", NULL, 0))
2617  av_dict_set(&opts, "threads", "auto", 0);
2618  if (stream_lowres)
2619  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2620  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2621  goto fail;
2622  }
2623  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2624  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2626  goto fail;
2627  }
2628 
2629  is->eof = 0;
2630  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2631  switch (avctx->codec_type) {
2632  case AVMEDIA_TYPE_AUDIO:
2633 #if CONFIG_AVFILTER
2634  {
2635  AVFilterContext *sink;
2636 
2637  is->audio_filter_src.freq = avctx->sample_rate;
2638  ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &avctx->ch_layout);
2639  if (ret < 0)
2640  goto fail;
2641  is->audio_filter_src.fmt = avctx->sample_fmt;
2642  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2643  goto fail;
2644  sink = is->out_audio_filter;
2646  ret = av_buffersink_get_ch_layout(sink, &ch_layout);
2647  if (ret < 0)
2648  goto fail;
2649  }
2650 #else
2651  sample_rate = avctx->sample_rate;
2652  ret = av_channel_layout_copy(&ch_layout, &avctx->ch_layout);
2653  if (ret < 0)
2654  goto fail;
2655 #endif
2656 
2657  /* prepare audio output */
2658  if ((ret = audio_open(is, &ch_layout, sample_rate, &is->audio_tgt)) < 0)
2659  goto fail;
2660  is->audio_hw_buf_size = ret;
2661  is->audio_src = is->audio_tgt;
2662  is->audio_buf_size = 0;
2663  is->audio_buf_index = 0;
2664 
2665  /* init averaging filter */
2666  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2667  is->audio_diff_avg_count = 0;
2668  /* since we do not have a precise anough audio FIFO fullness,
2669  we correct audio sync only if larger than this threshold */
2670  is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;
2671 
2672  is->audio_stream = stream_index;
2673  is->audio_st = ic->streams[stream_index];
2674 
2675  if ((ret = decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread)) < 0)
2676  goto fail;
2677  if ((is->ic->iformat->flags & (AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH | AVFMT_NO_BYTE_SEEK)) && !is->ic->iformat->read_seek) {
2678  is->auddec.start_pts = is->audio_st->start_time;
2679  is->auddec.start_pts_tb = is->audio_st->time_base;
2680  }
2681  if ((ret = decoder_start(&is->auddec, audio_thread, "audio_decoder", is)) < 0)
2682  goto out;
2683  SDL_PauseAudioDevice(audio_dev, 0);
2684  break;
2685  case AVMEDIA_TYPE_VIDEO:
2686  is->video_stream = stream_index;
2687  is->video_st = ic->streams[stream_index];
2688 
2689  if ((ret = decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread)) < 0)
2690  goto fail;
2691  if ((ret = decoder_start(&is->viddec, video_thread, "video_decoder", is)) < 0)
2692  goto out;
2693  is->queue_attachments_req = 1;
2694  break;
2695  case AVMEDIA_TYPE_SUBTITLE:
2696  is->subtitle_stream = stream_index;
2697  is->subtitle_st = ic->streams[stream_index];
2698 
2699  if ((ret = decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread)) < 0)
2700  goto fail;
2701  if ((ret = decoder_start(&is->subdec, subtitle_thread, "subtitle_decoder", is)) < 0)
2702  goto out;
2703  break;
2704  default:
2705  break;
2706  }
2707  goto out;
2708 
2709 fail:
2710  avcodec_free_context(&avctx);
2711 out:
2712  av_channel_layout_uninit(&ch_layout);
2713  av_dict_free(&opts);
2714 
2715  return ret;
2716 }
2717 
2718 static int decode_interrupt_cb(void *ctx)
2719 {
2720  VideoState *is = ctx;
2721  return is->abort_request;
2722 }
2723 
2724 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2725  return stream_id < 0 ||
2726  queue->abort_request ||
2728  queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2729 }
2730 
2732 {
2733  if( !strcmp(s->iformat->name, "rtp")
2734  || !strcmp(s->iformat->name, "rtsp")
2735  || !strcmp(s->iformat->name, "sdp")
2736  )
2737  return 1;
2738 
2739  if(s->pb && ( !strncmp(s->url, "rtp:", 4)
2740  || !strncmp(s->url, "udp:", 4)
2741  )
2742  )
2743  return 1;
2744  return 0;
2745 }
2746 
2747 /* this thread gets the stream from the disk or the network */
2748 static int read_thread(void *arg)
2749 {
2750  VideoState *is = arg;
2751  AVFormatContext *ic = NULL;
2752  int err, i, ret;
2753  int st_index[AVMEDIA_TYPE_NB];
2754  AVPacket *pkt = NULL;
2755  int64_t stream_start_time;
2756  int pkt_in_play_range = 0;
2757  const AVDictionaryEntry *t;
2758  SDL_mutex *wait_mutex = SDL_CreateMutex();
2759  int scan_all_pmts_set = 0;
2760  int64_t pkt_ts;
2761 
2762  if (!wait_mutex) {
2763  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2764  ret = AVERROR(ENOMEM);
2765  goto fail;
2766  }
2767 
2768  memset(st_index, -1, sizeof(st_index));
2769  is->eof = 0;
2770 
2771  pkt = av_packet_alloc();
2772  if (!pkt) {
2773  av_log(NULL, AV_LOG_FATAL, "Could not allocate packet.\n");
2774  ret = AVERROR(ENOMEM);
2775  goto fail;
2776  }
2777  ic = avformat_alloc_context();
2778  if (!ic) {
2779  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2780  ret = AVERROR(ENOMEM);
2781  goto fail;
2782  }
2785  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2786  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2787  scan_all_pmts_set = 1;
2788  }
2789  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2790  if (err < 0) {
2791  print_error(is->filename, err);
2792  ret = -1;
2793  goto fail;
2794  }
2795  if (scan_all_pmts_set)
2796  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2797 
2799  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2801  goto fail;
2802  }
2803  is->ic = ic;
2804 
2805  if (genpts)
2806  ic->flags |= AVFMT_FLAG_GENPTS;
2807 
2809 
2810  if (find_stream_info) {
2812  int orig_nb_streams = ic->nb_streams;
2813 
2814  err = avformat_find_stream_info(ic, opts);
2815 
2816  for (i = 0; i < orig_nb_streams; i++)
2817  av_dict_free(&opts[i]);
2818  av_freep(&opts);
2819 
2820  if (err < 0) {
2822  "%s: could not find codec parameters\n", is->filename);
2823  ret = -1;
2824  goto fail;
2825  }
2826  }
2827 
2828  if (ic->pb)
2829  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2830 
2831  if (seek_by_bytes < 0)
2833  !!(ic->iformat->flags & AVFMT_TS_DISCONT) &&
2834  strcmp("ogg", ic->iformat->name);
2835 
2836  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2837 
2838  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2839  window_title = av_asprintf("%s - %s", t->value, input_filename);
2840 
2841  /* if seeking requested, we execute it */
2842  if (start_time != AV_NOPTS_VALUE) {
2843  int64_t timestamp;
2844 
2845  timestamp = start_time;
2846  /* add the stream start time */
2847  if (ic->start_time != AV_NOPTS_VALUE)
2848  timestamp += ic->start_time;
2849  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2850  if (ret < 0) {
2851  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2852  is->filename, (double)timestamp / AV_TIME_BASE);
2853  }
2854  }
2855 
2856  is->realtime = is_realtime(ic);
2857 
2858  if (show_status)
2859  av_dump_format(ic, 0, is->filename, 0);
2860 
2861  for (i = 0; i < ic->nb_streams; i++) {
2862  AVStream *st = ic->streams[i];
2863  enum AVMediaType type = st->codecpar->codec_type;
2864  st->discard = AVDISCARD_ALL;
2865  if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
2867  st_index[type] = i;
2868  }
2869  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2870  if (wanted_stream_spec[i] && st_index[i] == -1) {
2871  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2872  st_index[i] = INT_MAX;
2873  }
2874  }
2875 
2876  if (!video_disable)
2877  st_index[AVMEDIA_TYPE_VIDEO] =
2879  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2880  if (!audio_disable)
2881  st_index[AVMEDIA_TYPE_AUDIO] =
2883  st_index[AVMEDIA_TYPE_AUDIO],
2884  st_index[AVMEDIA_TYPE_VIDEO],
2885  NULL, 0);
2887  st_index[AVMEDIA_TYPE_SUBTITLE] =
2889  st_index[AVMEDIA_TYPE_SUBTITLE],
2890  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2891  st_index[AVMEDIA_TYPE_AUDIO] :
2892  st_index[AVMEDIA_TYPE_VIDEO]),
2893  NULL, 0);
2894 
2895  is->show_mode = show_mode;
2896  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2897  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2898  AVCodecParameters *codecpar = st->codecpar;
2900  if (codecpar->width)
2901  set_default_window_size(codecpar->width, codecpar->height, sar);
2902  }
2903 
2904  /* open the streams */
2905  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2907  }
2908 
2909  ret = -1;
2910  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2912  }
2913  if (is->show_mode == SHOW_MODE_NONE)
2914  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2915 
2916  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2918  }
2919 
2920  if (is->video_stream < 0 && is->audio_stream < 0) {
2921  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2922  is->filename);
2923  ret = -1;
2924  goto fail;
2925  }
2926 
2927  if (infinite_buffer < 0 && is->realtime)
2928  infinite_buffer = 1;
2929 
2930  for (;;) {
2931  if (is->abort_request)
2932  break;
2933  if (is->paused != is->last_paused) {
2934  is->last_paused = is->paused;
2935  if (is->paused)
2936  is->read_pause_return = av_read_pause(ic);
2937  else
2938  av_read_play(ic);
2939  }
2940 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2941  if (is->paused &&
2942  (!strcmp(ic->iformat->name, "rtsp") ||
2943  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2944  /* wait 10 ms to avoid trying to get another packet */
2945  /* XXX: horrible */
2946  SDL_Delay(10);
2947  continue;
2948  }
2949 #endif
2950  if (is->seek_req) {
2951  int64_t seek_target = is->seek_pos;
2952  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2953  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2954 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2955 // of the seek_pos/seek_rel variables
2956 
2957  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2958  if (ret < 0) {
2960  "%s: error while seeking\n", is->ic->url);
2961  } else {
2962  if (is->audio_stream >= 0)
2963  packet_queue_flush(&is->audioq);
2964  if (is->subtitle_stream >= 0)
2965  packet_queue_flush(&is->subtitleq);
2966  if (is->video_stream >= 0)
2967  packet_queue_flush(&is->videoq);
2968  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2969  set_clock(&is->extclk, NAN, 0);
2970  } else {
2971  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
2972  }
2973  }
2974  is->seek_req = 0;
2975  is->queue_attachments_req = 1;
2976  is->eof = 0;
2977  if (is->paused)
2979  }
2980  if (is->queue_attachments_req) {
2981  if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
2982  if ((ret = av_packet_ref(pkt, &is->video_st->attached_pic)) < 0)
2983  goto fail;
2984  packet_queue_put(&is->videoq, pkt);
2985  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
2986  }
2987  is->queue_attachments_req = 0;
2988  }
2989 
2990  /* if the queue are full, no need to read more */
2991  if (infinite_buffer<1 &&
2992  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2993  || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
2994  stream_has_enough_packets(is->video_st, is->video_stream, &is->videoq) &&
2995  stream_has_enough_packets(is->subtitle_st, is->subtitle_stream, &is->subtitleq)))) {
2996  /* wait 10 ms */
2997  SDL_LockMutex(wait_mutex);
2998  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2999  SDL_UnlockMutex(wait_mutex);
3000  continue;
3001  }
3002  if (!is->paused &&
3003  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3004  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3005  if (loop != 1 && (!loop || --loop)) {
3007  } else if (autoexit) {
3008  ret = AVERROR_EOF;
3009  goto fail;
3010  }
3011  }
3012  ret = av_read_frame(ic, pkt);
3013  if (ret < 0) {
3014  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3015  if (is->video_stream >= 0)
3016  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
3017  if (is->audio_stream >= 0)
3018  packet_queue_put_nullpacket(&is->audioq, pkt, is->audio_stream);
3019  if (is->subtitle_stream >= 0)
3020  packet_queue_put_nullpacket(&is->subtitleq, pkt, is->subtitle_stream);
3021  is->eof = 1;
3022  }
3023  if (ic->pb && ic->pb->error) {
3024  if (autoexit)
3025  goto fail;
3026  else
3027  break;
3028  }
3029  SDL_LockMutex(wait_mutex);
3030  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3031  SDL_UnlockMutex(wait_mutex);
3032  continue;
3033  } else {
3034  is->eof = 0;
3035  }
3036  /* check if packet is in play range specified by user, then queue, otherwise discard */
3037  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3038  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3039  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3040  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3042  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3043  <= ((double)duration / 1000000);
3044  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3045  packet_queue_put(&is->audioq, pkt);
3046  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3047  && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
3048  packet_queue_put(&is->videoq, pkt);
3049  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3050  packet_queue_put(&is->subtitleq, pkt);
3051  } else {
3053  }
3054  }
3055 
3056  ret = 0;
3057  fail:
3058  if (ic && !is->ic)
3059  avformat_close_input(&ic);
3060 
3061  av_packet_free(&pkt);
3062  if (ret != 0) {
3063  SDL_Event event;
3064 
3065  event.type = FF_QUIT_EVENT;
3066  event.user.data1 = is;
3067  SDL_PushEvent(&event);
3068  }
3069  SDL_DestroyMutex(wait_mutex);
3070  return 0;
3071 }
3072 
3073 static VideoState *stream_open(const char *filename,
3074  const AVInputFormat *iformat)
3075 {
3076  VideoState *is;
3077 
3078  is = av_mallocz(sizeof(VideoState));
3079  if (!is)
3080  return NULL;
3081  is->last_video_stream = is->video_stream = -1;
3082  is->last_audio_stream = is->audio_stream = -1;
3083  is->last_subtitle_stream = is->subtitle_stream = -1;
3084  is->filename = av_strdup(filename);
3085  if (!is->filename)
3086  goto fail;
3087  is->iformat = iformat;
3088  is->ytop = 0;
3089  is->xleft = 0;
3090 
3091  /* start video display */
3092  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3093  goto fail;
3094  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3095  goto fail;
3096  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3097  goto fail;
3098 
3099  if (packet_queue_init(&is->videoq) < 0 ||
3100  packet_queue_init(&is->audioq) < 0 ||
3101  packet_queue_init(&is->subtitleq) < 0)
3102  goto fail;
3103 
3104  if (!(is->continue_read_thread = SDL_CreateCond())) {
3105  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3106  goto fail;
3107  }
3108 
3109  init_clock(&is->vidclk, &is->videoq.serial);
3110  init_clock(&is->audclk, &is->audioq.serial);
3111  init_clock(&is->extclk, &is->extclk.serial);
3112  is->audio_clock_serial = -1;
3113  if (startup_volume < 0)
3114  av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
3115  if (startup_volume > 100)
3116  av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
3118  startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
3119  is->audio_volume = startup_volume;
3120  is->muted = 0;
3121  is->av_sync_type = av_sync_type;
3122  is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
3123  if (!is->read_tid) {
3124  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3125 fail:
3126  stream_close(is);
3127  return NULL;
3128  }
3129  return is;
3130 }
3131 
3133 {
3134  AVFormatContext *ic = is->ic;
3135  int start_index, stream_index;
3136  int old_index;
3137  AVStream *st;
3138  AVProgram *p = NULL;
3139  int nb_streams = is->ic->nb_streams;
3140 
3141  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3142  start_index = is->last_video_stream;
3143  old_index = is->video_stream;
3144  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3145  start_index = is->last_audio_stream;
3146  old_index = is->audio_stream;
3147  } else {
3148  start_index = is->last_subtitle_stream;
3149  old_index = is->subtitle_stream;
3150  }
3151  stream_index = start_index;
3152 
3153  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3154  p = av_find_program_from_stream(ic, NULL, is->video_stream);
3155  if (p) {
3157  for (start_index = 0; start_index < nb_streams; start_index++)
3158  if (p->stream_index[start_index] == stream_index)
3159  break;
3160  if (start_index == nb_streams)
3161  start_index = -1;
3162  stream_index = start_index;
3163  }
3164  }
3165 
3166  for (;;) {
3167  if (++stream_index >= nb_streams)
3168  {
3170  {
3171  stream_index = -1;
3172  is->last_subtitle_stream = -1;
3173  goto the_end;
3174  }
3175  if (start_index == -1)
3176  return;
3177  stream_index = 0;
3178  }
3179  if (stream_index == start_index)
3180  return;
3181  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3182  if (st->codecpar->codec_type == codec_type) {
3183  /* check that parameters are OK */
3184  switch (codec_type) {
3185  case AVMEDIA_TYPE_AUDIO:
3186  if (st->codecpar->sample_rate != 0 &&
3187  st->codecpar->ch_layout.nb_channels != 0)
3188  goto the_end;
3189  break;
3190  case AVMEDIA_TYPE_VIDEO:
3191  case AVMEDIA_TYPE_SUBTITLE:
3192  goto the_end;
3193  default:
3194  break;
3195  }
3196  }
3197  }
3198  the_end:
3199  if (p && stream_index != -1)
3200  stream_index = p->stream_index[stream_index];
3201  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3203  old_index,
3204  stream_index);
3205 
3206  stream_component_close(is, old_index);
3207  stream_component_open(is, stream_index);
3208 }
3209 
3210 
3212 {
3214  SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
3215 }
3216 
3218 {
3219  int next = is->show_mode;
3220  do {
3221  next = (next + 1) % SHOW_MODE_NB;
3222  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3223  if (is->show_mode != next) {
3224  is->force_refresh = 1;
3225  is->show_mode = next;
3226  }
3227 }
3228 
3229 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3230  double remaining_time = 0.0;
3231  SDL_PumpEvents();
3232  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3234  SDL_ShowCursor(0);
3235  cursor_hidden = 1;
3236  }
3237  if (remaining_time > 0.0)
3238  av_usleep((int64_t)(remaining_time * 1000000.0));
3239  remaining_time = REFRESH_RATE;
3240  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3241  video_refresh(is, &remaining_time);
3242  SDL_PumpEvents();
3243  }
3244 }
3245 
3246 static void seek_chapter(VideoState *is, int incr)
3247 {
3248  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3249  int i;
3250 
3251  if (!is->ic->nb_chapters)
3252  return;
3253 
3254  /* find the current chapter */
3255  for (i = 0; i < is->ic->nb_chapters; i++) {
3256  AVChapter *ch = is->ic->chapters[i];
3257  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3258  i--;
3259  break;
3260  }
3261  }
3262 
3263  i += incr;
3264  i = FFMAX(i, 0);
3265  if (i >= is->ic->nb_chapters)
3266  return;
3267 
3268  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3269  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3270  AV_TIME_BASE_Q), 0, 0);
3271 }
3272 
3273 /* handle an event sent by the GUI */
3274 static void event_loop(VideoState *cur_stream)
3275 {
3276  SDL_Event event;
3277  double incr, pos, frac;
3278 
3279  for (;;) {
3280  double x;
3281  refresh_loop_wait_event(cur_stream, &event);
3282  switch (event.type) {
3283  case SDL_KEYDOWN:
3284  if (exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
3285  do_exit(cur_stream);
3286  break;
3287  }
3288  // If we don't yet have a window, skip all key events, because read_thread might still be initializing...
3289  if (!cur_stream->width)
3290  continue;
3291  switch (event.key.keysym.sym) {
3292  case SDLK_f:
3293  toggle_full_screen(cur_stream);
3294  cur_stream->force_refresh = 1;
3295  break;
3296  case SDLK_p:
3297  case SDLK_SPACE:
3298  toggle_pause(cur_stream);
3299  break;
3300  case SDLK_m:
3301  toggle_mute(cur_stream);
3302  break;
3303  case SDLK_KP_MULTIPLY:
3304  case SDLK_0:
3305  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3306  break;
3307  case SDLK_KP_DIVIDE:
3308  case SDLK_9:
3309  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3310  break;
3311  case SDLK_s: // S: Step to next frame
3312  step_to_next_frame(cur_stream);
3313  break;
3314  case SDLK_a:
3316  break;
3317  case SDLK_v:
3319  break;
3320  case SDLK_c:
3324  break;
3325  case SDLK_t:
3327  break;
3328  case SDLK_w:
3329 #if CONFIG_AVFILTER
3330  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3331  if (++cur_stream->vfilter_idx >= nb_vfilters)
3332  cur_stream->vfilter_idx = 0;
3333  } else {
3334  cur_stream->vfilter_idx = 0;
3335  toggle_audio_display(cur_stream);
3336  }
3337 #else
3338  toggle_audio_display(cur_stream);
3339 #endif
3340  break;
3341  case SDLK_PAGEUP:
3342  if (cur_stream->ic->nb_chapters <= 1) {
3343  incr = 600.0;
3344  goto do_seek;
3345  }
3346  seek_chapter(cur_stream, 1);
3347  break;
3348  case SDLK_PAGEDOWN:
3349  if (cur_stream->ic->nb_chapters <= 1) {
3350  incr = -600.0;
3351  goto do_seek;
3352  }
3353  seek_chapter(cur_stream, -1);
3354  break;
3355  case SDLK_LEFT:
3356  incr = seek_interval ? -seek_interval : -10.0;
3357  goto do_seek;
3358  case SDLK_RIGHT:
3359  incr = seek_interval ? seek_interval : 10.0;
3360  goto do_seek;
3361  case SDLK_UP:
3362  incr = 60.0;
3363  goto do_seek;
3364  case SDLK_DOWN:
3365  incr = -60.0;
3366  do_seek:
3367  if (seek_by_bytes) {
3368  pos = -1;
3369  if (pos < 0 && cur_stream->video_stream >= 0)
3370  pos = frame_queue_last_pos(&cur_stream->pictq);
3371  if (pos < 0 && cur_stream->audio_stream >= 0)
3372  pos = frame_queue_last_pos(&cur_stream->sampq);
3373  if (pos < 0)
3374  pos = avio_tell(cur_stream->ic->pb);
3375  if (cur_stream->ic->bit_rate)
3376  incr *= cur_stream->ic->bit_rate / 8.0;
3377  else
3378  incr *= 180000.0;
3379  pos += incr;
3380  stream_seek(cur_stream, pos, incr, 1);
3381  } else {
3382  pos = get_master_clock(cur_stream);
3383  if (isnan(pos))
3384  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3385  pos += incr;
3386  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3387  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3388  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3389  }
3390  break;
3391  default:
3392  break;
3393  }
3394  break;
3395  case SDL_MOUSEBUTTONDOWN:
3396  if (exit_on_mousedown) {
3397  do_exit(cur_stream);
3398  break;
3399  }
3400  if (event.button.button == SDL_BUTTON_LEFT) {
3401  static int64_t last_mouse_left_click = 0;
3402  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3403  toggle_full_screen(cur_stream);
3404  cur_stream->force_refresh = 1;
3405  last_mouse_left_click = 0;
3406  } else {
3407  last_mouse_left_click = av_gettime_relative();
3408  }
3409  }
3410  case SDL_MOUSEMOTION:
3411  if (cursor_hidden) {
3412  SDL_ShowCursor(1);
3413  cursor_hidden = 0;
3414  }
3416  if (event.type == SDL_MOUSEBUTTONDOWN) {
3417  if (event.button.button != SDL_BUTTON_RIGHT)
3418  break;
3419  x = event.button.x;
3420  } else {
3421  if (!(event.motion.state & SDL_BUTTON_RMASK))
3422  break;
3423  x = event.motion.x;
3424  }
3425  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3426  uint64_t size = avio_size(cur_stream->ic->pb);
3427  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3428  } else {
3429  int64_t ts;
3430  int ns, hh, mm, ss;
3431  int tns, thh, tmm, tss;
3432  tns = cur_stream->ic->duration / 1000000LL;
3433  thh = tns / 3600;
3434  tmm = (tns % 3600) / 60;
3435  tss = (tns % 60);
3436  frac = x / cur_stream->width;
3437  ns = frac * tns;
3438  hh = ns / 3600;
3439  mm = (ns % 3600) / 60;
3440  ss = (ns % 60);
3442  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3443  hh, mm, ss, thh, tmm, tss);
3444  ts = frac * cur_stream->ic->duration;
3445  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3446  ts += cur_stream->ic->start_time;
3447  stream_seek(cur_stream, ts, 0, 0);
3448  }
3449  break;
3450  case SDL_WINDOWEVENT:
3451  switch (event.window.event) {
3452  case SDL_WINDOWEVENT_SIZE_CHANGED:
3453  screen_width = cur_stream->width = event.window.data1;
3454  screen_height = cur_stream->height = event.window.data2;
3455  if (cur_stream->vis_texture) {
3456  SDL_DestroyTexture(cur_stream->vis_texture);
3457  cur_stream->vis_texture = NULL;
3458  }
3459  case SDL_WINDOWEVENT_EXPOSED:
3460  cur_stream->force_refresh = 1;
3461  }
3462  break;
3463  case SDL_QUIT:
3464  case FF_QUIT_EVENT:
3465  do_exit(cur_stream);
3466  break;
3467  default:
3468  break;
3469  }
3470  }
3471 }
3472 
3473 static int opt_width(void *optctx, const char *opt, const char *arg)
3474 {
3475  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3476  return 0;
3477 }
3478 
3479 static int opt_height(void *optctx, const char *opt, const char *arg)
3480 {
3481  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3482  return 0;
3483 }
3484 
3485 static int opt_format(void *optctx, const char *opt, const char *arg)
3486 {
3488  if (!file_iformat) {
3489  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3490  return AVERROR(EINVAL);
3491  }
3492  return 0;
3493 }
3494 
3495 static int opt_sync(void *optctx, const char *opt, const char *arg)
3496 {
3497  if (!strcmp(arg, "audio"))
3499  else if (!strcmp(arg, "video"))
3501  else if (!strcmp(arg, "ext"))
3503  else {
3504  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3505  exit(1);
3506  }
3507  return 0;
3508 }
3509 
3510 static int opt_seek(void *optctx, const char *opt, const char *arg)
3511 {
3512  start_time = parse_time_or_die(opt, arg, 1);
3513  return 0;
3514 }
3515 
3516 static int opt_duration(void *optctx, const char *opt, const char *arg)
3517 {
3518  duration = parse_time_or_die(opt, arg, 1);
3519  return 0;
3520 }
3521 
3522 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3523 {
3524  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3525  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3526  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3527  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3528  return 0;
3529 }
3530 
3531 static void opt_input_file(void *optctx, const char *filename)
3532 {
3533  if (input_filename) {
3535  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3536  filename, input_filename);
3537  exit(1);
3538  }
3539  if (!strcmp(filename, "-"))
3540  filename = "pipe:";
3541  input_filename = filename;
3542 }
3543 
3544 static int opt_codec(void *optctx, const char *opt, const char *arg)
3545 {
3546  const char *spec = strchr(opt, ':');
3547  if (!spec) {
3549  "No media specifier was specified in '%s' in option '%s'\n",
3550  arg, opt);
3551  return AVERROR(EINVAL);
3552  }
3553  spec++;
3554  switch (spec[0]) {
3555  case 'a' : audio_codec_name = arg; break;
3556  case 's' : subtitle_codec_name = arg; break;
3557  case 'v' : video_codec_name = arg; break;
3558  default:
3560  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3561  return AVERROR(EINVAL);
3562  }
3563  return 0;
3564 }
3565 
3566 static int dummy;
3567 
3568 static const OptionDef options[] = {
3570  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3571  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3572  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3573  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3574  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3575  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3576  { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3577  { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3578  { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3579  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3580  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3581  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3582  { "seek_interval", OPT_FLOAT | HAS_ARG, { &seek_interval }, "set seek interval for left/right keys, in seconds", "seconds" },
3583  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3584  { "noborder", OPT_BOOL, { &borderless }, "borderless window" },
3585  { "alwaysontop", OPT_BOOL, { &alwaysontop }, "window always on top" },
3586  { "volume", OPT_INT | HAS_ARG, { &startup_volume}, "set startup volume 0=min 100=max", "volume" },
3587  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3588  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3589  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3590  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3591  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3592  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3593  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3594  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3595  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3596  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3597  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3598  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3599  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3600  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3601  { "left", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_left }, "set the x position for the left of the window", "x pos" },
3602  { "top", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_top }, "set the y position for the top of the window", "y pos" },
3603 #if CONFIG_AVFILTER
3604  { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3605  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3606 #endif
3607  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3608  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3609  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3610  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3611  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3612  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3613  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3614  { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3615  { "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
3616  "read and decode the streams to fill missing information with heuristics" },
3617  { "filter_threads", HAS_ARG | OPT_INT | OPT_EXPERT, { &filter_nbthreads }, "number of filter threads per graph" },
3618  { NULL, },
3619 };
3620 
3621 static void show_usage(void)
3622 {
3623  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3624  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3625  av_log(NULL, AV_LOG_INFO, "\n");
3626 }
3627 
3628 void show_help_default(const char *opt, const char *arg)
3629 {
3631  show_usage();
3632  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3633  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3634  printf("\n");
3637 #if !CONFIG_AVFILTER
3639 #else
3641 #endif
3642  printf("\nWhile playing:\n"
3643  "q, ESC quit\n"
3644  "f toggle full screen\n"
3645  "p, SPC pause\n"
3646  "m toggle mute\n"
3647  "9, 0 decrease and increase volume respectively\n"
3648  "/, * decrease and increase volume respectively\n"
3649  "a cycle audio channel in the current program\n"
3650  "v cycle video channel\n"
3651  "t cycle subtitle channel in the current program\n"
3652  "c cycle program\n"
3653  "w cycle video filters or show modes\n"
3654  "s activate frame-step mode\n"
3655  "left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
3656  "down/up seek backward/forward 1 minute\n"
3657  "page down/page up seek backward/forward 10 minutes\n"
3658  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3659  "left double-click toggle full screen\n"
3660  );
3661 }
3662 
3663 /* Called from the main */
3664 int main(int argc, char **argv)
3665 {
3666  int flags;
3667  VideoState *is;
3668 
3669  init_dynload();
3670 
3672  parse_loglevel(argc, argv, options);
3673 
3674  /* register all codecs, demux and protocols */
3675 #if CONFIG_AVDEVICE
3677 #endif
3679 
3680  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3681  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3682 
3683  show_banner(argc, argv, options);
3684 
3685  parse_options(NULL, argc, argv, options, opt_input_file);
3686 
3687  if (!input_filename) {
3688  show_usage();
3689  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3691  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3692  exit(1);
3693  }
3694 
3695  if (display_disable) {
3696  video_disable = 1;
3697  }
3698  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3699  if (audio_disable)
3700  flags &= ~SDL_INIT_AUDIO;
3701  else {
3702  /* Try to work around an occasional ALSA buffer underflow issue when the
3703  * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3704  if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3705  SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
3706  }
3707  if (display_disable)
3708  flags &= ~SDL_INIT_VIDEO;
3709  if (SDL_Init (flags)) {
3710  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3711  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3712  exit(1);
3713  }
3714 
3715  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3716  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3717 
3718  if (!display_disable) {
3719  int flags = SDL_WINDOW_HIDDEN;
3720  if (alwaysontop)
3721 #if SDL_VERSION_ATLEAST(2,0,5)
3722  flags |= SDL_WINDOW_ALWAYS_ON_TOP;
3723 #else
3724  av_log(NULL, AV_LOG_WARNING, "Your SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.\n");
3725 #endif
3726  if (borderless)
3727  flags |= SDL_WINDOW_BORDERLESS;
3728  else
3729  flags |= SDL_WINDOW_RESIZABLE;
3730 
3731 #ifdef SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR
3732  SDL_SetHint(SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR, "0");
3733 #endif
3734  window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
3735  SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
3736  if (window) {
3737  renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
3738  if (!renderer) {
3739  av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
3740  renderer = SDL_CreateRenderer(window, -1, 0);
3741  }
3742  if (renderer) {
3743  if (!SDL_GetRendererInfo(renderer, &renderer_info))
3744  av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", renderer_info.name);
3745  }
3746  }
3747  if (!window || !renderer || !renderer_info.num_texture_formats) {
3748  av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError());
3749  do_exit(NULL);
3750  }
3751  }
3752 
3754  if (!is) {
3755  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3756  do_exit(NULL);
3757  }
3758 
3759  event_loop(is);
3760 
3761  /* never returns */
3762 
3763  return 0;
3764 }
OPT_FLOAT
#define OPT_FLOAT
Definition: cmdutils.h:156
AVSubtitle
Definition: avcodec.h:2308
rect::w
int w
Definition: f_ebur128.c:76
sws_getCachedContext
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2490
do_exit
static void do_exit(VideoState *is)
Definition: ffplay.c:1287
VideoState::seek_rel
int64_t seek_rel
Definition: ffplay.c:214
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:422
AVCodec
AVCodec.
Definition: codec.h:204
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
VideoState::video_st
AVStream * video_st
Definition: ffplay.c:284
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:241
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
Frame::width
int width
Definition: ffplay.c:161
AVFMT_NO_BYTE_SEEK
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:489
av_clip
#define av_clip
Definition: common.h:95
AudioParams::fmt
enum AVSampleFormat fmt
Definition: ffplay.c:138
av_sync_type
static int av_sync_type
Definition: ffplay.c:330
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
renderer_info
static SDL_RendererInfo renderer_info
Definition: ffplay.c:367
configure_filtergraph
int configure_filtergraph(FilterGraph *fg)
Definition: ffmpeg_filter.c:962
frame_queue_nb_remaining
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:790
frame_queue_next
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:774
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:58
Decoder::finished
int finished
Definition: ffplay.c:193
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:904
av_compare_ts
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
AVProgram::nb_stream_indexes
unsigned int nb_stream_indexes
Definition: avformat.h:1145
frame_queue_last_pos
static int64_t frame_queue_last_pos(FrameQueue *f)
Definition: ffplay.c:796
out
FILE * out
Definition: movenc.c:54
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1007
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
video_thread
static int video_thread(void *arg)
Definition: ffplay.c:2122
VideoState::av_sync_type
int av_sync_type
Definition: ffplay.c:233
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:691
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
set_default_window_size
static void set_default_window_size(int width, int height, AVRational sar)
Definition: ffplay.c:1313
AV_PIX_FMT_BGR32
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:434
AV_NOSYNC_THRESHOLD
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:88
AVFormatContext::nb_chapters
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1434
AVCodecParameters
This struct describes the properties of an encoded stream.
Definition: codec_par.h:54
AV_LOG_QUIET
#define AV_LOG_QUIET
Print no output.
Definition: log.h:162
FrameQueue::keep_last
int keep_last
Definition: ffplay.c:175
sub
static float sub(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:31
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:947
VideoState::audio_hw_buf_size
int audio_hw_buf_size
Definition: ffplay.c:243
decoder_decode_frame
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
Definition: ffplay.c:577
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AV_DISPOSITION_ATTACHED_PIC
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:879
FrameQueue::cond
SDL_cond * cond
Definition: ffplay.c:178
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:146
AVStream::discard
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:1010
av_find_program_from_stream
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: avformat.c:362
display_disable
static int display_disable
Definition: ffplay.c:325
screen_width
static int screen_width
Definition: ffplay.c:315
DEBUG
#define DEBUG
Definition: vf_framerate.c:29
sws_dict
AVDictionary * sws_dict
Definition: cmdutils.c:58
swr_set_compensation
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:1001
SAMPLE_ARRAY_SIZE
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:106
rect
Definition: f_ebur128.c:76
update_volume
static void update_volume(VideoState *is, int sign, double step)
Definition: ffplay.c:1499
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
VideoState::auddec
Decoder auddec
Definition: ffplay.c:227
screen_left
static int screen_left
Definition: ffplay.c:317
av_opt_set_int_list
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:721
AudioParams::frame_size
int frame_size
Definition: ffplay.c:139
AVSubtitleRect
Definition: avcodec.h:2280
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:116
Decoder::next_pts
int64_t next_pts
Definition: ffplay.c:198
decoder_start
static int decoder_start(Decoder *d, int(*fn)(void *), const char *thread_name, void *arg)
Definition: ffplay.c:2111
rect::y
int y
Definition: f_ebur128.c:76
FrameQueue::size
int size
Definition: ffplay.c:173
avformat_get_class
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:197
av_unused
#define av_unused
Definition: attributes.h:131
Frame::sar
AVRational sar
Definition: ffplay.c:164
out_size
int out_size
Definition: movenc.c:55
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:116
upload_texture
static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx)
Definition: ffplay.c:894
VideoState::vis_texture
SDL_Texture * vis_texture
Definition: ffplay.c:272
queue_picture
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
Definition: ffplay.c:1732
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
AudioParams
Definition: ffplay.c:135
VideoState::subtitle_st
AVStream * subtitle_st
Definition: ffplay.c:277
pixdesc.h
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1284
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
VideoState::frame_last_filter_delay
double frame_last_filter_delay
Definition: ffplay.c:282
AVFrame::width
int width
Definition: frame.h:397
VideoState::xleft
int xleft
Definition: ffplay.c:292
Frame::pts
double pts
Definition: ffplay.c:158
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:661
OPT_INPUT
#define OPT_INPUT
Definition: cmdutils.h:166
avcodec_decode_subtitle2
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:833
frame_queue_init
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
Definition: ffplay.c:677
subtitle_codec_name
static const char * subtitle_codec_name
Definition: ffplay.c:345
EXTERNAL_CLOCK_MIN_FRAMES
#define EXTERNAL_CLOCK_MIN_FRAMES
Definition: ffplay.c:70
HAS_ARG
#define HAS_ARG
Definition: cmdutils.h:149
b
#define b
Definition: input.c:41
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:159
AVChapter::start
int64_t start
Definition: avformat.h:1178
Clock
Definition: ffplay.c:143
data
const char data[16]
Definition: mxf.c:146
frame_queue_destory
static void frame_queue_destory(FrameQueue *f)
Definition: ffplay.c:698
SAMPLE_QUEUE_SIZE
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:132
program_name
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:65
Decoder::queue
PacketQueue * queue
Definition: ffplay.c:190
format_opts
AVDictionary * format_opts
Definition: cmdutils.c:60
AVSEEK_FLAG_BYTE
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2305
AVIOContext::error
int error
contains the error code or 0 if no error happened
Definition: avio.h:245
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:75
AV_PIX_FMT_RGB32_1
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:433
VideoState::audio_diff_avg_coef
double audio_diff_avg_coef
Definition: ffplay.c:238
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
CURSOR_HIDE_DELAY
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:108
SDL_VOLUME_STEP
#define SDL_VOLUME_STEP
Definition: ffplay.c:79
show_help_children
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
Definition: cmdutils.c:169
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
autorotate
static int autorotate
Definition: ffplay.c:355
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:392
TextureFormatEntry::texture_fmt
int texture_fmt
Definition: ffplay.c:372
video_disable
static int video_disable
Definition: ffplay.c:320
Frame::uploaded
int uploaded
Definition: ffplay.c:165
mathematics.h
sws_scale
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:1203
AVDictionary
Definition: dict.c:32
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:306
compute_target_delay
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1514
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Frame
Definition: ffplay.c:154
stream_close
static void stream_close(VideoState *is)
Definition: ffplay.c:1250
av_read_frame
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: demux.c:1439
SDL_AUDIO_MAX_CALLBACKS_PER_SEC
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:76
AVFMT_NOBINSEARCH
#define AVFMT_NOBINSEARCH
Format does not allow to fall back on binary search via read_timestamp.
Definition: avformat.h:487
VideoState::paused
int paused
Definition: ffplay.c:208
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:311
init_clock
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1395
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
AV_OPT_FLAG_FILTERING_PARAM
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:297
opt_seek
static int opt_seek(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3510
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:354
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:104
get_master_clock
static double get_master_clock(VideoState *is)
Definition: ffplay.c:1428
VideoState::width
int width
Definition: ffplay.c:292
file_iformat
static const AVInputFormat * file_iformat
Definition: ffplay.c:310
sample_rate
sample_rate
Definition: ffmpeg_filter.c:156
dummy
static int dummy
Definition: ffplay.c:3566
update_video_pts
static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial)
Definition: ffplay.c:1556
FF_QUIT_EVENT
#define FF_QUIT_EVENT
Definition: ffplay.c:363
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:73
PacketQueue
Definition: ffplay.c:119
subtitle_thread
static int subtitle_thread(void *arg)
Definition: ffplay.c:2229
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:732
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:346
VideoState::last_subtitle_stream
int last_subtitle_stream
Definition: ffplay.c:304
VideoState::SHOW_MODE_NONE
@ SHOW_MODE_NONE
Definition: ffplay.c:262
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:119
OptionDef
Definition: cmdutils.h:146
audio_decode_frame
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2332
subtitle_disable
static int subtitle_disable
Definition: ffplay.c:321
VideoState::pictq
FrameQueue pictq
Definition: ffplay.c:223
genpts
static int genpts
Definition: ffplay.c:334
VideoState::swr_ctx
struct SwrContext * swr_ctx
Definition: ffplay.c:257
opt_sync
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3495
av_find_best_stream
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, const AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
Definition: avformat.c:413
step_to_next_frame
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1506
VideoState::sampq
FrameQueue sampq
Definition: ffplay.c:225
TextureFormatEntry::format
enum AVPixelFormat format
Definition: ffplay.c:371
FrameQueue::rindex
int rindex
Definition: ffplay.c:171
video_display
static void video_display(VideoState *is)
Definition: ffplay.c:1349
AVCodec::max_lowres
uint8_t max_lowres
maximum value for lowres supported by the decoder
Definition: codec.h:224
avformat_close_input
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: demux.c:369
parse_number_or_die
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max)
Parse a string and return its corresponding value as a double.
Definition: cmdutils.c:107
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:593
AVFormatContext::interrupt_callback
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1483
SDL_AUDIO_MIN_BUFFER_SIZE
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:74
print_error
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:799
startup_volume
static int startup_volume
Definition: ffplay.c:328
window
static SDL_Window * window
Definition: ffplay.c:365
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
Definition: avfiltergraph.c:138
fifo.h
toggle_full_screen
static void toggle_full_screen(VideoState *is)
Definition: ffplay.c:3211
Clock::queue_serial
int * queue_serial
Definition: ffplay.c:150
VideoState::extclk
Clock extclk
Definition: ffplay.c:221
VideoState::seek_flags
int seek_flags
Definition: ffplay.c:212
alwaysontop
static int alwaysontop
Definition: ffplay.c:327
VideoState::audio_st
AVStream * audio_st
Definition: ffplay.c:241
packet_queue_init
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:468
AUDIO_DIFF_AVG_NB
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:99
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:2059
fail
#define fail()
Definition: checkasm.h:134
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:82
opt_duration
static int opt_duration(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3516
fn
#define fn(a)
Definition: afir_template.c:44
FrameQueue
Definition: ffplay.c:169
packet_queue_put
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:439
samplefmt.h
AVSubtitleRect::x
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2281
VideoState::video_stream
int video_stream
Definition: ffplay.c:283
autoexit
static int autoexit
Definition: ffplay.c:337
avio_tell
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:510
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
AVChapter
Definition: avformat.h:1175
video_image_display
static void video_image_display(VideoState *is)
Definition: ffplay.c:961
val
static double val(void *priv, double ch)
Definition: aeval.c:77
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:709
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
opt_show_mode
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3522
Decoder::empty_queue_cond
SDL_cond * empty_queue_cond
Definition: ffplay.c:195
pts
static int64_t pts
Definition: transcode_aac.c:654
set_clock_speed
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1389
VideoState::audio_diff_threshold
double audio_diff_threshold
Definition: ffplay.c:239
sws_get_class
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
Definition: options.c:97
AV_PKT_DATA_DISPLAYMATRIX
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:109
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:487
VideoState::audio_diff_cum
double audio_diff_cum
Definition: ffplay.c:237
VideoState::last_video_stream
int last_video_stream
Definition: ffplay.c:304
OPT_STRING
#define OPT_STRING
Definition: cmdutils.h:152
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:260
fast
static int fast
Definition: ffplay.c:333
loop
static int loop
Definition: ffplay.c:340
AVRational::num
int num
Numerator.
Definition: rational.h:59
avformat_network_init
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:557
VideoState::rdft_bits
int rdft_bits
Definition: ffplay.c:268
opt_height
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3479
avsubtitle_free
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: avcodec.c:409
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:104
sdl_texture_format_map
static const struct TextureFormatEntry sdl_texture_format_map[]
AVFormatContext::bit_rate
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1325
is_full_screen
static int is_full_screen
Definition: ffplay.c:360
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:212
set_sdl_yuv_conversion_mode
static void set_sdl_yuv_conversion_mode(AVFrame *frame)
Definition: ffplay.c:945
lrint
#define lrint
Definition: tablegen.h:53
Frame::flip_v
int flip_v
Definition: ffplay.c:166
av_guess_sample_aspect_ratio
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio.
Definition: avformat.c:655
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:206
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
AVFormatContext::metadata
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1445
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
AVInputFormat
Definition: avformat.h:656
audio_thread
static int audio_thread(void *arg)
Definition: ffplay.c:2026
set_clock
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1383
av_dump_format
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
Definition: dump.c:629
VideoState
Definition: ffplay.c:203
frame_queue_peek_next
static Frame * frame_queue_peek_next(FrameQueue *f)
Definition: ffplay.c:722
sdl_audio_callback
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
Definition: ffplay.c:2441
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
sync_clock_to_slave
static void sync_clock_to_slave(Clock *c, Clock *slave)
Definition: ffplay.c:1403
swr_init
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:191
opt_input_file
static void opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3531
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
avformat_open_input
int avformat_open_input(AVFormatContext **ps, const char *url, const AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: demux.c:221
frame_queue_signal
static void frame_queue_signal(FrameQueue *f)
Definition: ffplay.c:710
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:60
av_channel_layout_describe
int av_channel_layout_describe(const AVChannelLayout *channel_layout, char *buf, size_t buf_size)
Get a human-readable string describing the channel layout properties.
Definition: channel_layout.c:778
OPT_INT
#define OPT_INT
Definition: cmdutils.h:155
VideoState::img_convert_ctx
struct SwsContext * img_convert_ctx
Definition: ffplay.c:287
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
width
#define width
VideoState::ShowMode
ShowMode
Definition: ffplay.c:261
Decoder::avctx
AVCodecContext * avctx
Definition: ffplay.c:191
s
#define s(width, name)
Definition: cbs_vp9.c:256
AV_OPT_FLAG_ENCODING_PARAM
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
Definition: opt.h:281
show_help_default
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3628
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:50
default_height
static int default_height
Definition: ffplay.c:314
AVFormatContext::flags
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1334
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
AVCodecParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only.
Definition: codec_par.h:138
AVInputFormat::name
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:661
AVFormatContext::iformat
const struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1228
AV_PIX_FMT_0BGR32
#define AV_PIX_FMT_0BGR32
Definition: pixfmt.h:437
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:594
AVDictionaryEntry::key
char * key
Definition: dict.h:90
Clock::last_updated
double last_updated
Definition: ffplay.c:146
PacketQueue::duration
int64_t duration
Definition: ffplay.c:123
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
AVSubtitleRect::y
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2282
AVCodecParameters::width
int width
Video only.
Definition: codec_par.h:128
calculate_display_rect
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
Definition: ffplay.c:849
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
screen_height
static int screen_height
Definition: ffplay.c:316
EXTERNAL_CLOCK_SPEED_STEP
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:96
Decoder::pkt_serial
int pkt_serial
Definition: ffplay.c:192
avcodec_receive_frame
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder or encoder (when the AV_CODEC_FLAG_RECON_FRAME flag is used...
Definition: avcodec.c:709
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
AVMEDIA_TYPE_NB
@ AVMEDIA_TYPE_NB
Definition: avutil.h:206
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:172
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
av_read_play
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: demux_utils.c:202
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
opt_codec
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3544
Clock::pts_drift
double pts_drift
Definition: ffplay.c:145
VideoState::videoq
PacketQueue videoq
Definition: ffplay.c:285
ctx
AVFormatContext * ctx
Definition: movenc.c:48
channels
channels
Definition: aptx.h:31
limits.h
REFRESH_RATE
#define REFRESH_RATE
Definition: ffplay.c:102
FrameQueue::rindex_shown
int rindex_shown
Definition: ffplay.c:176
nb_streams
static int nb_streams
Definition: ffprobe.c:308
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
VideoState::force_refresh
int force_refresh
Definition: ffplay.c:207
get_clock
static double get_clock(Clock *c)
Definition: ffplay.c:1363
screen_top
static int screen_top
Definition: ffplay.c:318
VideoState::audio_diff_avg_count
int audio_diff_avg_count
Definition: ffplay.c:240
EXTERNAL_CLOCK_SPEED_MIN
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:94
sws_flags
static unsigned sws_flags
Definition: ffplay.c:112
parse_options
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:351
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
renderer
static SDL_Renderer * renderer
Definition: ffplay.c:366
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
SwrContext
The libswresample context.
Definition: swresample_internal.h:95
vp_duration
static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp)
Definition: ffplay.c:1544
NAN
#define NAN
Definition: mathematics.h:64
AV_PIX_FMT_BGR32_1
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:435
VideoState::step
int step
Definition: ffplay.c:293
av_rdft_calc
void av_rdft_calc(RDFTContext *s, FFTSample *data)
synchronize_audio
static int synchronize_audio(VideoState *is, int nb_samples)
Definition: ffplay.c:2284
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:408
arg
const char * arg
Definition: jacosubdec.c:67
if
if(ret)
Definition: filter_design.txt:179
window_title
static const char * window_title
Definition: ffplay.c:312
Clock::speed
double speed
Definition: ffplay.c:147
VideoState::SHOW_MODE_VIDEO
@ SHOW_MODE_VIDEO
Definition: ffplay.c:262
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:76
AVFormatContext
Format I/O context.
Definition: avformat.h:1216
av_log_get_level
int av_log_get_level(void)
Get the current log level.
Definition: log.c:437
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:601
init_dynload
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:77
opts
AVDictionary * opts
Definition: movenc.c:50
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1108
AVSubtitleRect::w
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:2283
seek_chapter
static void seek_chapter(VideoState *is, int incr)
Definition: ffplay.c:3246
get_master_sync_type
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1411
avcodec_get_class
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:183
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1167
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:978
NULL
#define NULL
Definition: coverity.c:32
avcodec_find_decoder_by_name
const AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
Definition: allcodecs.c:979
FrameQueue::max_size
int max_size
Definition: ffplay.c:174
OPT_EXPERT
#define OPT_EXPERT
Definition: cmdutils.h:151
AV_SYNC_AUDIO_MASTER
@ AV_SYNC_AUDIO_MASTER
Definition: ffplay.c:183
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
Decoder
Definition: ffplay.c:188
AudioParams::freq
int freq
Definition: ffplay.c:136
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
isnan
#define isnan(x)
Definition: libm.h:340
AudioParams::ch_layout
AVChannelLayout ch_layout
Definition: ffplay.c:137
audio_open
static int audio_open(void *opaque, AVChannelLayout *wanted_channel_layout, int wanted_sample_rate, struct AudioParams *audio_hw_params)
Definition: ffplay.c:2484
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:880
stream_cycle_channel
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3132
VideoState::frame_drops_late
int frame_drops_late
Definition: ffplay.c:259
AV_CODEC_FLAG2_FAST
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:303
VideoState::rdft
RDFTContext * rdft
Definition: ffplay.c:267
AVFormatContext::pb
AVIOContext * pb
I/O context.
Definition: avformat.h:1258
av_log_set_flags
void av_log_set_flags(int arg)
Definition: log.c:447
parseutils.h
frame_queue_unref_item
static void frame_queue_unref_item(Frame *vp)
Definition: ffplay.c:671
FrameQueue::queue
Frame queue[FRAME_QUEUE_SIZE]
Definition: ffplay.c:170
VideoState::last_i_start
int last_i_start
Definition: ffplay.c:266
Decoder::packet_pending
int packet_pending
Definition: ffplay.c:194
cursor_last_shown
static int64_t cursor_last_shown
Definition: ffplay.c:348
AVProgram::stream_index
unsigned int * stream_index
Definition: avformat.h:1144
frame_queue_peek
static Frame * frame_queue_peek(FrameQueue *f)
Definition: ffplay.c:717
OPT_INT64
#define OPT_INT64
Definition: cmdutils.h:158
avfilter_inout_alloc
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
Definition: graphparser.c:207
Frame::duration
double duration
Definition: ffplay.c:159
DFT_R2C
@ DFT_R2C
Definition: avfft.h:72
lowres
static int lowres
Definition: ffplay.c:335
double
double
Definition: af_crystalizer.c:132
AV_DICT_DONT_OVERWRITE
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:81
AV_PIX_FMT_RGB8
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
Definition: pixfmt.h:86
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:115
time.h
Frame::pos
int64_t pos
Definition: ffplay.c:160
FFTSample
float FFTSample
Definition: avfft.h:35
avfft.h
VideoState::frame_last_returned_time
double frame_last_returned_time
Defini