FFmpeg
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include "config_components.h"
28 #include <inttypes.h>
29 #include <math.h>
30 #include <limits.h>
31 #include <signal.h>
32 #include <stdint.h>
33 
34 #include "libavutil/avstring.h"
36 #include "libavutil/eval.h"
37 #include "libavutil/mathematics.h"
38 #include "libavutil/pixdesc.h"
39 #include "libavutil/imgutils.h"
40 #include "libavutil/dict.h"
41 #include "libavutil/fifo.h"
42 #include "libavutil/parseutils.h"
43 #include "libavutil/samplefmt.h"
44 #include "libavutil/time.h"
45 #include "libavutil/bprint.h"
46 #include "libavformat/avformat.h"
47 #include "libavdevice/avdevice.h"
48 #include "libswscale/swscale.h"
49 #include "libavutil/opt.h"
50 #include "libavutil/tx.h"
52 
53 #include "libavfilter/avfilter.h"
54 #include "libavfilter/buffersink.h"
55 #include "libavfilter/buffersrc.h"
56 
57 #include <SDL.h>
58 #include <SDL_thread.h>
59 
60 #include "cmdutils.h"
61 #include "ffplay_renderer.h"
62 #include "opt_common.h"
63 
64 const char program_name[] = "ffplay";
65 const int program_birth_year = 2003;
66 
67 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
68 #define MIN_FRAMES 25
69 #define EXTERNAL_CLOCK_MIN_FRAMES 2
70 #define EXTERNAL_CLOCK_MAX_FRAMES 10
71 
72 /* Minimum SDL audio buffer size, in samples. */
73 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
74 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
75 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
76 
77 /* Step size for volume control in dB */
78 #define SDL_VOLUME_STEP (0.75)
79 
80 /* no AV sync correction is done if below the minimum AV sync threshold */
81 #define AV_SYNC_THRESHOLD_MIN 0.04
82 /* AV sync correction is done if above the maximum AV sync threshold */
83 #define AV_SYNC_THRESHOLD_MAX 0.1
84 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
85 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
86 /* no AV correction is done if too big error */
87 #define AV_NOSYNC_THRESHOLD 10.0
88 
89 /* maximum audio speed change to get correct sync */
90 #define SAMPLE_CORRECTION_PERCENT_MAX 10
91 
92 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
93 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
94 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
95 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
96 
97 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
98 #define AUDIO_DIFF_AVG_NB 20
99 
100 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
101 #define REFRESH_RATE 0.01
102 
103 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
104 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
105 #define SAMPLE_ARRAY_SIZE (8 * 65536)
106 
107 #define CURSOR_HIDE_DELAY 1000000
108 
109 #define USE_ONEPASS_SUBTITLE_RENDER 1
110 
111 typedef struct MyAVPacketList {
113  int serial;
115 
116 typedef struct PacketQueue {
119  int size;
120  int64_t duration;
122  int serial;
123  SDL_mutex *mutex;
124  SDL_cond *cond;
125 } PacketQueue;
126 
127 #define VIDEO_PICTURE_QUEUE_SIZE 3
128 #define SUBPICTURE_QUEUE_SIZE 16
129 #define SAMPLE_QUEUE_SIZE 9
130 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
131 
132 typedef struct AudioParams {
133  int freq;
138 } AudioParams;
139 
140 typedef struct Clock {
141  double pts; /* clock base */
142  double pts_drift; /* clock base minus time at which we updated the clock */
143  double last_updated;
144  double speed;
145  int serial; /* clock is based on a packet with this serial */
146  int paused;
147  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
148 } Clock;
149 
150 typedef struct FrameData {
151  int64_t pkt_pos;
152 } FrameData;
153 
154 /* Common struct for handling all types of decoded data and allocated render buffers. */
155 typedef struct Frame {
158  int serial;
159  double pts; /* presentation timestamp for the frame */
160  double duration; /* estimated duration of the frame */
161  int64_t pos; /* byte position of the frame in the input file */
162  int width;
163  int height;
164  int format;
166  int uploaded;
167  int flip_v;
168 } Frame;
169 
170 typedef struct FrameQueue {
172  int rindex;
173  int windex;
174  int size;
175  int max_size;
178  SDL_mutex *mutex;
179  SDL_cond *cond;
181 } FrameQueue;
182 
183 enum {
184  AV_SYNC_AUDIO_MASTER, /* default choice */
186  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
187 };
188 
189 typedef struct Decoder {
194  int finished;
196  SDL_cond *empty_queue_cond;
197  int64_t start_pts;
199  int64_t next_pts;
201  SDL_Thread *decoder_tid;
202 } Decoder;
203 
204 typedef struct VideoState {
205  SDL_Thread *read_tid;
209  int paused;
212  int seek_req;
214  int64_t seek_pos;
215  int64_t seek_rel;
218  int realtime;
219 
223 
227 
231 
233 
235 
236  double audio_clock;
238  double audio_diff_cum; /* used for AV difference average computation */
245  uint8_t *audio_buf;
246  uint8_t *audio_buf1;
247  unsigned int audio_buf_size; /* in bytes */
248  unsigned int audio_buf1_size;
249  int audio_buf_index; /* in bytes */
252  int muted;
259 
260  enum ShowMode {
262  } show_mode;
269  float *real_data;
271  int xpos;
273  SDL_Texture *vis_texture;
274  SDL_Texture *sub_texture;
275  SDL_Texture *vid_texture;
276 
280 
281  double frame_timer;
287  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
289  int eof;
290 
291  char *filename;
293  int step;
294 
296  AVFilterContext *in_video_filter; // the first filter in the video chain
297  AVFilterContext *out_video_filter; // the last filter in the video chain
298  AVFilterContext *in_audio_filter; // the first filter in the audio chain
299  AVFilterContext *out_audio_filter; // the last filter in the audio chain
300  AVFilterGraph *agraph; // audio filter graph
301 
303 
305 } VideoState;
306 
307 /* options specified by the user */
309 static const char *input_filename;
310 static const char *window_title;
311 static int default_width = 640;
312 static int default_height = 480;
313 static int screen_width = 0;
314 static int screen_height = 0;
315 static int screen_left = SDL_WINDOWPOS_CENTERED;
316 static int screen_top = SDL_WINDOWPOS_CENTERED;
317 static int audio_disable;
318 static int video_disable;
319 static int subtitle_disable;
320 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
321 static int seek_by_bytes = -1;
322 static float seek_interval = 10;
323 static int display_disable;
324 static int borderless;
325 static int alwaysontop;
326 static int startup_volume = 100;
327 static int show_status = -1;
329 static int64_t start_time = AV_NOPTS_VALUE;
330 static int64_t duration = AV_NOPTS_VALUE;
331 static int fast = 0;
332 static int genpts = 0;
333 static int lowres = 0;
334 static int decoder_reorder_pts = -1;
335 static int autoexit;
336 static int exit_on_keydown;
337 static int exit_on_mousedown;
338 static int loop = 1;
339 static int framedrop = -1;
340 static int infinite_buffer = -1;
341 static enum ShowMode show_mode = SHOW_MODE_NONE;
342 static const char *audio_codec_name;
343 static const char *subtitle_codec_name;
344 static const char *video_codec_name;
345 double rdftspeed = 0.02;
346 static int64_t cursor_last_shown;
347 static int cursor_hidden = 0;
348 static const char **vfilters_list = NULL;
349 static int nb_vfilters = 0;
350 static char *afilters = NULL;
351 static int autorotate = 1;
352 static int find_stream_info = 1;
353 static int filter_nbthreads = 0;
354 static int enable_vulkan = 0;
355 static char *vulkan_params = NULL;
356 static const char *hwaccel = NULL;
357 
358 /* current context */
359 static int is_full_screen;
360 static int64_t audio_callback_time;
361 
362 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
363 
364 static SDL_Window *window;
365 static SDL_Renderer *renderer;
366 static SDL_RendererInfo renderer_info = {0};
367 static SDL_AudioDeviceID audio_dev;
368 
370 
371 static const struct TextureFormatEntry {
375  { AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
376  { AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
377  { AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
378  { AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
379  { AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
380  { AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
381  { AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
382  { AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
383  { AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
384  { AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
385  { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
386  { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
387  { AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
388  { AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
389  { AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
390  { AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
391  { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
392  { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
393  { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
394  { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
395 };
396 
397 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
398 {
400  if (ret < 0)
401  return ret;
402 
404  if (!vfilters_list[nb_vfilters - 1])
405  return AVERROR(ENOMEM);
406 
407  return 0;
408 }
409 
410 static inline
411 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
412  enum AVSampleFormat fmt2, int64_t channel_count2)
413 {
414  /* If channel count == 1, planar and non-planar formats are the same */
415  if (channel_count1 == 1 && channel_count2 == 1)
417  else
418  return channel_count1 != channel_count2 || fmt1 != fmt2;
419 }
420 
422 {
423  MyAVPacketList pkt1;
424  int ret;
425 
426  if (q->abort_request)
427  return -1;
428 
429 
430  pkt1.pkt = pkt;
431  pkt1.serial = q->serial;
432 
433  ret = av_fifo_write(q->pkt_list, &pkt1, 1);
434  if (ret < 0)
435  return ret;
436  q->nb_packets++;
437  q->size += pkt1.pkt->size + sizeof(pkt1);
438  q->duration += pkt1.pkt->duration;
439  /* XXX: should duplicate packet data in DV case */
440  SDL_CondSignal(q->cond);
441  return 0;
442 }
443 
445 {
446  AVPacket *pkt1;
447  int ret;
448 
449  pkt1 = av_packet_alloc();
450  if (!pkt1) {
452  return -1;
453  }
454  av_packet_move_ref(pkt1, pkt);
455 
456  SDL_LockMutex(q->mutex);
457  ret = packet_queue_put_private(q, pkt1);
458  SDL_UnlockMutex(q->mutex);
459 
460  if (ret < 0)
461  av_packet_free(&pkt1);
462 
463  return ret;
464 }
465 
466 static int packet_queue_put_nullpacket(PacketQueue *q, AVPacket *pkt, int stream_index)
467 {
468  pkt->stream_index = stream_index;
469  return packet_queue_put(q, pkt);
470 }
471 
472 /* packet queue handling */
474 {
475  memset(q, 0, sizeof(PacketQueue));
477  if (!q->pkt_list)
478  return AVERROR(ENOMEM);
479  q->mutex = SDL_CreateMutex();
480  if (!q->mutex) {
481  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
482  return AVERROR(ENOMEM);
483  }
484  q->cond = SDL_CreateCond();
485  if (!q->cond) {
486  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
487  return AVERROR(ENOMEM);
488  }
489  q->abort_request = 1;
490  return 0;
491 }
492 
494 {
495  MyAVPacketList pkt1;
496 
497  SDL_LockMutex(q->mutex);
498  while (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0)
499  av_packet_free(&pkt1.pkt);
500  q->nb_packets = 0;
501  q->size = 0;
502  q->duration = 0;
503  q->serial++;
504  SDL_UnlockMutex(q->mutex);
505 }
506 
508 {
511  SDL_DestroyMutex(q->mutex);
512  SDL_DestroyCond(q->cond);
513 }
514 
516 {
517  SDL_LockMutex(q->mutex);
518 
519  q->abort_request = 1;
520 
521  SDL_CondSignal(q->cond);
522 
523  SDL_UnlockMutex(q->mutex);
524 }
525 
527 {
528  SDL_LockMutex(q->mutex);
529  q->abort_request = 0;
530  q->serial++;
531  SDL_UnlockMutex(q->mutex);
532 }
533 
534 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
535 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
536 {
537  MyAVPacketList pkt1;
538  int ret;
539 
540  SDL_LockMutex(q->mutex);
541 
542  for (;;) {
543  if (q->abort_request) {
544  ret = -1;
545  break;
546  }
547 
548  if (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0) {
549  q->nb_packets--;
550  q->size -= pkt1.pkt->size + sizeof(pkt1);
551  q->duration -= pkt1.pkt->duration;
552  av_packet_move_ref(pkt, pkt1.pkt);
553  if (serial)
554  *serial = pkt1.serial;
555  av_packet_free(&pkt1.pkt);
556  ret = 1;
557  break;
558  } else if (!block) {
559  ret = 0;
560  break;
561  } else {
562  SDL_CondWait(q->cond, q->mutex);
563  }
564  }
565  SDL_UnlockMutex(q->mutex);
566  return ret;
567 }
568 
569 static int decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
570  memset(d, 0, sizeof(Decoder));
571  d->pkt = av_packet_alloc();
572  if (!d->pkt)
573  return AVERROR(ENOMEM);
574  d->avctx = avctx;
575  d->queue = queue;
576  d->empty_queue_cond = empty_queue_cond;
577  d->start_pts = AV_NOPTS_VALUE;
578  d->pkt_serial = -1;
579  return 0;
580 }
581 
583  int ret = AVERROR(EAGAIN);
584 
585  for (;;) {
586  if (d->queue->serial == d->pkt_serial) {
587  do {
588  if (d->queue->abort_request)
589  return -1;
590 
591  switch (d->avctx->codec_type) {
592  case AVMEDIA_TYPE_VIDEO:
593  ret = avcodec_receive_frame(d->avctx, frame);
594  if (ret >= 0) {
595  if (decoder_reorder_pts == -1) {
597  } else if (!decoder_reorder_pts) {
598  frame->pts = frame->pkt_dts;
599  }
600  }
601  break;
602  case AVMEDIA_TYPE_AUDIO:
603  ret = avcodec_receive_frame(d->avctx, frame);
604  if (ret >= 0) {
606  if (frame->pts != AV_NOPTS_VALUE)
607  frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);
608  else if (d->next_pts != AV_NOPTS_VALUE)
609  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
610  if (frame->pts != AV_NOPTS_VALUE) {
611  d->next_pts = frame->pts + frame->nb_samples;
612  d->next_pts_tb = tb;
613  }
614  }
615  break;
616  }
617  if (ret == AVERROR_EOF) {
618  d->finished = d->pkt_serial;
619  avcodec_flush_buffers(d->avctx);
620  return 0;
621  }
622  if (ret >= 0)
623  return 1;
624  } while (ret != AVERROR(EAGAIN));
625  }
626 
627  do {
628  if (d->queue->nb_packets == 0)
629  SDL_CondSignal(d->empty_queue_cond);
630  if (d->packet_pending) {
631  d->packet_pending = 0;
632  } else {
633  int old_serial = d->pkt_serial;
634  if (packet_queue_get(d->queue, d->pkt, 1, &d->pkt_serial) < 0)
635  return -1;
636  if (old_serial != d->pkt_serial) {
637  avcodec_flush_buffers(d->avctx);
638  d->finished = 0;
639  d->next_pts = d->start_pts;
640  d->next_pts_tb = d->start_pts_tb;
641  }
642  }
643  if (d->queue->serial == d->pkt_serial)
644  break;
645  av_packet_unref(d->pkt);
646  } while (1);
647 
648  if (d->avctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
649  int got_frame = 0;
650  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, d->pkt);
651  if (ret < 0) {
652  ret = AVERROR(EAGAIN);
653  } else {
654  if (got_frame && !d->pkt->data) {
655  d->packet_pending = 1;
656  }
657  ret = got_frame ? 0 : (d->pkt->data ? AVERROR(EAGAIN) : AVERROR_EOF);
658  }
659  av_packet_unref(d->pkt);
660  } else {
661  if (d->pkt->buf && !d->pkt->opaque_ref) {
662  FrameData *fd;
663 
664  d->pkt->opaque_ref = av_buffer_allocz(sizeof(*fd));
665  if (!d->pkt->opaque_ref)
666  return AVERROR(ENOMEM);
667  fd = (FrameData*)d->pkt->opaque_ref->data;
668  fd->pkt_pos = d->pkt->pos;
669  }
670 
671  if (avcodec_send_packet(d->avctx, d->pkt) == AVERROR(EAGAIN)) {
672  av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
673  d->packet_pending = 1;
674  } else {
675  av_packet_unref(d->pkt);
676  }
677  }
678  }
679 }
680 
681 static void decoder_destroy(Decoder *d) {
682  av_packet_free(&d->pkt);
683  avcodec_free_context(&d->avctx);
684 }
685 
687 {
688  av_frame_unref(vp->frame);
689  avsubtitle_free(&vp->sub);
690 }
691 
692 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
693 {
694  int i;
695  memset(f, 0, sizeof(FrameQueue));
696  if (!(f->mutex = SDL_CreateMutex())) {
697  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
698  return AVERROR(ENOMEM);
699  }
700  if (!(f->cond = SDL_CreateCond())) {
701  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
702  return AVERROR(ENOMEM);
703  }
704  f->pktq = pktq;
705  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
706  f->keep_last = !!keep_last;
707  for (i = 0; i < f->max_size; i++)
708  if (!(f->queue[i].frame = av_frame_alloc()))
709  return AVERROR(ENOMEM);
710  return 0;
711 }
712 
714 {
715  int i;
716  for (i = 0; i < f->max_size; i++) {
717  Frame *vp = &f->queue[i];
719  av_frame_free(&vp->frame);
720  }
721  SDL_DestroyMutex(f->mutex);
722  SDL_DestroyCond(f->cond);
723 }
724 
726 {
727  SDL_LockMutex(f->mutex);
728  SDL_CondSignal(f->cond);
729  SDL_UnlockMutex(f->mutex);
730 }
731 
733 {
734  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
735 }
736 
738 {
739  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
740 }
741 
743 {
744  return &f->queue[f->rindex];
745 }
746 
748 {
749  /* wait until we have space to put a new frame */
750  SDL_LockMutex(f->mutex);
751  while (f->size >= f->max_size &&
752  !f->pktq->abort_request) {
753  SDL_CondWait(f->cond, f->mutex);
754  }
755  SDL_UnlockMutex(f->mutex);
756 
757  if (f->pktq->abort_request)
758  return NULL;
759 
760  return &f->queue[f->windex];
761 }
762 
764 {
765  /* wait until we have a readable a new frame */
766  SDL_LockMutex(f->mutex);
767  while (f->size - f->rindex_shown <= 0 &&
768  !f->pktq->abort_request) {
769  SDL_CondWait(f->cond, f->mutex);
770  }
771  SDL_UnlockMutex(f->mutex);
772 
773  if (f->pktq->abort_request)
774  return NULL;
775 
776  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
777 }
778 
780 {
781  if (++f->windex == f->max_size)
782  f->windex = 0;
783  SDL_LockMutex(f->mutex);
784  f->size++;
785  SDL_CondSignal(f->cond);
786  SDL_UnlockMutex(f->mutex);
787 }
788 
790 {
791  if (f->keep_last && !f->rindex_shown) {
792  f->rindex_shown = 1;
793  return;
794  }
795  frame_queue_unref_item(&f->queue[f->rindex]);
796  if (++f->rindex == f->max_size)
797  f->rindex = 0;
798  SDL_LockMutex(f->mutex);
799  f->size--;
800  SDL_CondSignal(f->cond);
801  SDL_UnlockMutex(f->mutex);
802 }
803 
804 /* return the number of undisplayed frames in the queue */
806 {
807  return f->size - f->rindex_shown;
808 }
809 
810 /* return last shown position */
812 {
813  Frame *fp = &f->queue[f->rindex];
814  if (f->rindex_shown && fp->serial == f->pktq->serial)
815  return fp->pos;
816  else
817  return -1;
818 }
819 
820 static void decoder_abort(Decoder *d, FrameQueue *fq)
821 {
822  packet_queue_abort(d->queue);
823  frame_queue_signal(fq);
824  SDL_WaitThread(d->decoder_tid, NULL);
825  d->decoder_tid = NULL;
826  packet_queue_flush(d->queue);
827 }
828 
829 static inline void fill_rectangle(int x, int y, int w, int h)
830 {
831  SDL_Rect rect;
832  rect.x = x;
833  rect.y = y;
834  rect.w = w;
835  rect.h = h;
836  if (w && h)
837  SDL_RenderFillRect(renderer, &rect);
838 }
839 
840 static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
841 {
842  Uint32 format;
843  int access, w, h;
844  if (!*texture || SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
845  void *pixels;
846  int pitch;
847  if (*texture)
848  SDL_DestroyTexture(*texture);
849  if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
850  return -1;
851  if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
852  return -1;
853  if (init_texture) {
854  if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
855  return -1;
856  memset(pixels, 0, pitch * new_height);
857  SDL_UnlockTexture(*texture);
858  }
859  av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
860  }
861  return 0;
862 }
863 
864 static void calculate_display_rect(SDL_Rect *rect,
865  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
866  int pic_width, int pic_height, AVRational pic_sar)
867 {
868  AVRational aspect_ratio = pic_sar;
869  int64_t width, height, x, y;
870 
871  if (av_cmp_q(aspect_ratio, av_make_q(0, 1)) <= 0)
872  aspect_ratio = av_make_q(1, 1);
873 
874  aspect_ratio = av_mul_q(aspect_ratio, av_make_q(pic_width, pic_height));
875 
876  /* XXX: we suppose the screen has a 1.0 pixel ratio */
877  height = scr_height;
878  width = av_rescale(height, aspect_ratio.num, aspect_ratio.den) & ~1;
879  if (width > scr_width) {
880  width = scr_width;
881  height = av_rescale(width, aspect_ratio.den, aspect_ratio.num) & ~1;
882  }
883  x = (scr_width - width) / 2;
884  y = (scr_height - height) / 2;
885  rect->x = scr_xleft + x;
886  rect->y = scr_ytop + y;
887  rect->w = FFMAX((int)width, 1);
888  rect->h = FFMAX((int)height, 1);
889 }
890 
891 static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
892 {
893  int i;
894  *sdl_blendmode = SDL_BLENDMODE_NONE;
895  *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
896  if (format == AV_PIX_FMT_RGB32 ||
900  *sdl_blendmode = SDL_BLENDMODE_BLEND;
901  for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
903  *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
904  return;
905  }
906  }
907 }
908 
909 static int upload_texture(SDL_Texture **tex, AVFrame *frame)
910 {
911  int ret = 0;
912  Uint32 sdl_pix_fmt;
913  SDL_BlendMode sdl_blendmode;
914  get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
915  if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
916  return -1;
917  switch (sdl_pix_fmt) {
918  case SDL_PIXELFORMAT_IYUV:
919  if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
920  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
921  frame->data[1], frame->linesize[1],
922  frame->data[2], frame->linesize[2]);
923  } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
924  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
925  frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
926  frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
927  } else {
928  av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
929  return -1;
930  }
931  break;
932  default:
933  if (frame->linesize[0] < 0) {
934  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
935  } else {
936  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
937  }
938  break;
939  }
940  return ret;
941 }
942 
948 };
949 
951 {
952 #if SDL_VERSION_ATLEAST(2,0,8)
953  SDL_YUV_CONVERSION_MODE mode = SDL_YUV_CONVERSION_AUTOMATIC;
956  mode = SDL_YUV_CONVERSION_JPEG;
957  else if (frame->colorspace == AVCOL_SPC_BT709)
958  mode = SDL_YUV_CONVERSION_BT709;
960  mode = SDL_YUV_CONVERSION_BT601;
961  }
962  SDL_SetYUVConversionMode(mode); /* FIXME: no support for linear transfer */
963 #endif
964 }
965 
967 {
968  Frame *vp;
969  Frame *sp = NULL;
970  SDL_Rect rect;
971 
972  vp = frame_queue_peek_last(&is->pictq);
973  if (vk_renderer) {
975  return;
976  }
977 
978  if (is->subtitle_st) {
979  if (frame_queue_nb_remaining(&is->subpq) > 0) {
980  sp = frame_queue_peek(&is->subpq);
981 
982  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
983  if (!sp->uploaded) {
984  uint8_t* pixels[4];
985  int pitch[4];
986  int i;
987  if (!sp->width || !sp->height) {
988  sp->width = vp->width;
989  sp->height = vp->height;
990  }
991  if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
992  return;
993 
994  for (i = 0; i < sp->sub.num_rects; i++) {
995  AVSubtitleRect *sub_rect = sp->sub.rects[i];
996 
997  sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
998  sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
999  sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
1000  sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
1001 
1002  is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx,
1003  sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
1004  sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
1005  0, NULL, NULL, NULL);
1006  if (!is->sub_convert_ctx) {
1007  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1008  return;
1009  }
1010  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) {
1011  sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
1012  0, sub_rect->h, pixels, pitch);
1013  SDL_UnlockTexture(is->sub_texture);
1014  }
1015  }
1016  sp->uploaded = 1;
1017  }
1018  } else
1019  sp = NULL;
1020  }
1021  }
1022 
1023  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1025 
1026  if (!vp->uploaded) {
1027  if (upload_texture(&is->vid_texture, vp->frame) < 0) {
1029  return;
1030  }
1031  vp->uploaded = 1;
1032  vp->flip_v = vp->frame->linesize[0] < 0;
1033  }
1034 
1035  SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
1037  if (sp) {
1038 #if USE_ONEPASS_SUBTITLE_RENDER
1039  SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
1040 #else
1041  int i;
1042  double xratio = (double)rect.w / (double)sp->width;
1043  double yratio = (double)rect.h / (double)sp->height;
1044  for (i = 0; i < sp->sub.num_rects; i++) {
1045  SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
1046  SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
1047  .y = rect.y + sub_rect->y * yratio,
1048  .w = sub_rect->w * xratio,
1049  .h = sub_rect->h * yratio};
1050  SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
1051  }
1052 #endif
1053  }
1054 }
1055 
1056 static inline int compute_mod(int a, int b)
1057 {
1058  return a < 0 ? a%b + b : a%b;
1059 }
1060 
1062 {
1063  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1064  int ch, channels, h, h2;
1065  int64_t time_diff;
1066  int rdft_bits, nb_freq;
1067 
1068  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1069  ;
1070  nb_freq = 1 << (rdft_bits - 1);
1071 
1072  /* compute display index : center on currently output samples */
1073  channels = s->audio_tgt.ch_layout.nb_channels;
1074  nb_display_channels = channels;
1075  if (!s->paused) {
1076  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1077  n = 2 * channels;
1078  delay = s->audio_write_buf_size;
1079  delay /= n;
1080 
1081  /* to be more precise, we take into account the time spent since
1082  the last buffer computation */
1083  if (audio_callback_time) {
1084  time_diff = av_gettime_relative() - audio_callback_time;
1085  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1086  }
1087 
1088  delay += 2 * data_used;
1089  if (delay < data_used)
1090  delay = data_used;
1091 
1092  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1093  if (s->show_mode == SHOW_MODE_WAVES) {
1094  h = INT_MIN;
1095  for (i = 0; i < 1000; i += channels) {
1096  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1097  int a = s->sample_array[idx];
1098  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1099  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1100  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1101  int score = a - d;
1102  if (h < score && (b ^ c) < 0) {
1103  h = score;
1104  i_start = idx;
1105  }
1106  }
1107  }
1108 
1109  s->last_i_start = i_start;
1110  } else {
1111  i_start = s->last_i_start;
1112  }
1113 
1114  if (s->show_mode == SHOW_MODE_WAVES) {
1115  SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
1116 
1117  /* total height for one channel */
1118  h = s->height / nb_display_channels;
1119  /* graph height / 2 */
1120  h2 = (h * 9) / 20;
1121  for (ch = 0; ch < nb_display_channels; ch++) {
1122  i = i_start + ch;
1123  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1124  for (x = 0; x < s->width; x++) {
1125  y = (s->sample_array[i] * h2) >> 15;
1126  if (y < 0) {
1127  y = -y;
1128  ys = y1 - y;
1129  } else {
1130  ys = y1;
1131  }
1132  fill_rectangle(s->xleft + x, ys, 1, y);
1133  i += channels;
1134  if (i >= SAMPLE_ARRAY_SIZE)
1135  i -= SAMPLE_ARRAY_SIZE;
1136  }
1137  }
1138 
1139  SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
1140 
1141  for (ch = 1; ch < nb_display_channels; ch++) {
1142  y = s->ytop + ch * h;
1143  fill_rectangle(s->xleft, y, s->width, 1);
1144  }
1145  } else {
1146  int err = 0;
1147  if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
1148  return;
1149 
1150  if (s->xpos >= s->width)
1151  s->xpos = 0;
1152  nb_display_channels= FFMIN(nb_display_channels, 2);
1153  if (rdft_bits != s->rdft_bits) {
1154  const float rdft_scale = 1.0;
1155  av_tx_uninit(&s->rdft);
1156  av_freep(&s->real_data);
1157  av_freep(&s->rdft_data);
1158  s->rdft_bits = rdft_bits;
1159  s->real_data = av_malloc_array(nb_freq, 4 *sizeof(*s->real_data));
1160  s->rdft_data = av_malloc_array(nb_freq + 1, 2 *sizeof(*s->rdft_data));
1161  err = av_tx_init(&s->rdft, &s->rdft_fn, AV_TX_FLOAT_RDFT,
1162  0, 1 << rdft_bits, &rdft_scale, 0);
1163  }
1164  if (err < 0 || !s->rdft_data) {
1165  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1166  s->show_mode = SHOW_MODE_WAVES;
1167  } else {
1168  float *data_in[2];
1169  AVComplexFloat *data[2];
1170  SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
1171  uint32_t *pixels;
1172  int pitch;
1173  for (ch = 0; ch < nb_display_channels; ch++) {
1174  data_in[ch] = s->real_data + 2 * nb_freq * ch;
1175  data[ch] = s->rdft_data + nb_freq * ch;
1176  i = i_start + ch;
1177  for (x = 0; x < 2 * nb_freq; x++) {
1178  double w = (x-nb_freq) * (1.0 / nb_freq);
1179  data_in[ch][x] = s->sample_array[i] * (1.0 - w * w);
1180  i += channels;
1181  if (i >= SAMPLE_ARRAY_SIZE)
1182  i -= SAMPLE_ARRAY_SIZE;
1183  }
1184  s->rdft_fn(s->rdft, data[ch], data_in[ch], sizeof(float));
1185  data[ch][0].im = data[ch][nb_freq].re;
1186  data[ch][nb_freq].re = 0;
1187  }
1188  /* Least efficient way to do this, we should of course
1189  * directly access it but it is more than fast enough. */
1190  if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
1191  pitch >>= 2;
1192  pixels += pitch * s->height;
1193  for (y = 0; y < s->height; y++) {
1194  double w = 1 / sqrt(nb_freq);
1195  int a = sqrt(w * sqrt(data[0][y].re * data[0][y].re + data[0][y].im * data[0][y].im));
1196  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][y].re, data[1][y].im))
1197  : a;
1198  a = FFMIN(a, 255);
1199  b = FFMIN(b, 255);
1200  pixels -= pitch;
1201  *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
1202  }
1203  SDL_UnlockTexture(s->vis_texture);
1204  }
1205  SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
1206  }
1207  if (!s->paused)
1208  s->xpos++;
1209  }
1210 }
1211 
1212 static void stream_component_close(VideoState *is, int stream_index)
1213 {
1214  AVFormatContext *ic = is->ic;
1215  AVCodecParameters *codecpar;
1216 
1217  if (stream_index < 0 || stream_index >= ic->nb_streams)
1218  return;
1219  codecpar = ic->streams[stream_index]->codecpar;
1220 
1221  switch (codecpar->codec_type) {
1222  case AVMEDIA_TYPE_AUDIO:
1223  decoder_abort(&is->auddec, &is->sampq);
1224  SDL_CloseAudioDevice(audio_dev);
1225  decoder_destroy(&is->auddec);
1226  swr_free(&is->swr_ctx);
1227  av_freep(&is->audio_buf1);
1228  is->audio_buf1_size = 0;
1229  is->audio_buf = NULL;
1230 
1231  if (is->rdft) {
1232  av_tx_uninit(&is->rdft);
1233  av_freep(&is->real_data);
1234  av_freep(&is->rdft_data);
1235  is->rdft = NULL;
1236  is->rdft_bits = 0;
1237  }
1238  break;
1239  case AVMEDIA_TYPE_VIDEO:
1240  decoder_abort(&is->viddec, &is->pictq);
1241  decoder_destroy(&is->viddec);
1242  break;
1243  case AVMEDIA_TYPE_SUBTITLE:
1244  decoder_abort(&is->subdec, &is->subpq);
1245  decoder_destroy(&is->subdec);
1246  break;
1247  default:
1248  break;
1249  }
1250 
1251  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1252  switch (codecpar->codec_type) {
1253  case AVMEDIA_TYPE_AUDIO:
1254  is->audio_st = NULL;
1255  is->audio_stream = -1;
1256  break;
1257  case AVMEDIA_TYPE_VIDEO:
1258  is->video_st = NULL;
1259  is->video_stream = -1;
1260  break;
1261  case AVMEDIA_TYPE_SUBTITLE:
1262  is->subtitle_st = NULL;
1263  is->subtitle_stream = -1;
1264  break;
1265  default:
1266  break;
1267  }
1268 }
1269 
1271 {
1272  /* XXX: use a special url_shutdown call to abort parse cleanly */
1273  is->abort_request = 1;
1274  SDL_WaitThread(is->read_tid, NULL);
1275 
1276  /* close each stream */
1277  if (is->audio_stream >= 0)
1278  stream_component_close(is, is->audio_stream);
1279  if (is->video_stream >= 0)
1280  stream_component_close(is, is->video_stream);
1281  if (is->subtitle_stream >= 0)
1282  stream_component_close(is, is->subtitle_stream);
1283 
1284  avformat_close_input(&is->ic);
1285 
1286  packet_queue_destroy(&is->videoq);
1287  packet_queue_destroy(&is->audioq);
1288  packet_queue_destroy(&is->subtitleq);
1289 
1290  /* free all pictures */
1291  frame_queue_destroy(&is->pictq);
1292  frame_queue_destroy(&is->sampq);
1293  frame_queue_destroy(&is->subpq);
1294  SDL_DestroyCond(is->continue_read_thread);
1295  sws_freeContext(is->sub_convert_ctx);
1296  av_free(is->filename);
1297  if (is->vis_texture)
1298  SDL_DestroyTexture(is->vis_texture);
1299  if (is->vid_texture)
1300  SDL_DestroyTexture(is->vid_texture);
1301  if (is->sub_texture)
1302  SDL_DestroyTexture(is->sub_texture);
1303  av_free(is);
1304 }
1305 
1306 static void do_exit(VideoState *is)
1307 {
1308  if (is) {
1309  stream_close(is);
1310  }
1311  if (renderer)
1312  SDL_DestroyRenderer(renderer);
1313  if (vk_renderer)
1315  if (window)
1316  SDL_DestroyWindow(window);
1317  uninit_opts();
1318  for (int i = 0; i < nb_vfilters; i++)
1326  if (show_status)
1327  printf("\n");
1328  SDL_Quit();
1329  av_log(NULL, AV_LOG_QUIET, "%s", "");
1330  exit(0);
1331 }
1332 
1333 static void sigterm_handler(int sig)
1334 {
1335  exit(123);
1336 }
1337 
1339 {
1340  SDL_Rect rect;
1341  int max_width = screen_width ? screen_width : INT_MAX;
1342  int max_height = screen_height ? screen_height : INT_MAX;
1343  if (max_width == INT_MAX && max_height == INT_MAX)
1344  max_height = height;
1345  calculate_display_rect(&rect, 0, 0, max_width, max_height, width, height, sar);
1346  default_width = rect.w;
1347  default_height = rect.h;
1348 }
1349 
1351 {
1352  int w,h;
1353 
1356 
1357  if (!window_title)
1359  SDL_SetWindowTitle(window, window_title);
1360 
1361  SDL_SetWindowSize(window, w, h);
1362  SDL_SetWindowPosition(window, screen_left, screen_top);
1363  if (is_full_screen)
1364  SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1365  SDL_ShowWindow(window);
1366 
1367  is->width = w;
1368  is->height = h;
1369 
1370  return 0;
1371 }
1372 
1373 /* display the current picture, if any */
1375 {
1376  if (!is->width)
1377  video_open(is);
1378 
1379  SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1380  SDL_RenderClear(renderer);
1381  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1383  else if (is->video_st)
1385  SDL_RenderPresent(renderer);
1386 }
1387 
1388 static double get_clock(Clock *c)
1389 {
1390  if (*c->queue_serial != c->serial)
1391  return NAN;
1392  if (c->paused) {
1393  return c->pts;
1394  } else {
1395  double time = av_gettime_relative() / 1000000.0;
1396  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1397  }
1398 }
1399 
1400 static void set_clock_at(Clock *c, double pts, int serial, double time)
1401 {
1402  c->pts = pts;
1403  c->last_updated = time;
1404  c->pts_drift = c->pts - time;
1405  c->serial = serial;
1406 }
1407 
1408 static void set_clock(Clock *c, double pts, int serial)
1409 {
1410  double time = av_gettime_relative() / 1000000.0;
1411  set_clock_at(c, pts, serial, time);
1412 }
1413 
1414 static void set_clock_speed(Clock *c, double speed)
1415 {
1416  set_clock(c, get_clock(c), c->serial);
1417  c->speed = speed;
1418 }
1419 
1420 static void init_clock(Clock *c, int *queue_serial)
1421 {
1422  c->speed = 1.0;
1423  c->paused = 0;
1424  c->queue_serial = queue_serial;
1425  set_clock(c, NAN, -1);
1426 }
1427 
1428 static void sync_clock_to_slave(Clock *c, Clock *slave)
1429 {
1430  double clock = get_clock(c);
1431  double slave_clock = get_clock(slave);
1432  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1433  set_clock(c, slave_clock, slave->serial);
1434 }
1435 
1437  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1438  if (is->video_st)
1439  return AV_SYNC_VIDEO_MASTER;
1440  else
1441  return AV_SYNC_AUDIO_MASTER;
1442  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1443  if (is->audio_st)
1444  return AV_SYNC_AUDIO_MASTER;
1445  else
1446  return AV_SYNC_EXTERNAL_CLOCK;
1447  } else {
1448  return AV_SYNC_EXTERNAL_CLOCK;
1449  }
1450 }
1451 
1452 /* get the current master clock value */
1454 {
1455  double val;
1456 
1457  switch (get_master_sync_type(is)) {
1458  case AV_SYNC_VIDEO_MASTER:
1459  val = get_clock(&is->vidclk);
1460  break;
1461  case AV_SYNC_AUDIO_MASTER:
1462  val = get_clock(&is->audclk);
1463  break;
1464  default:
1465  val = get_clock(&is->extclk);
1466  break;
1467  }
1468  return val;
1469 }
1470 
1472  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1473  is->audio_stream >= 0 && is->audioq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES) {
1475  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1476  (is->audio_stream < 0 || is->audioq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES)) {
1478  } else {
1479  double speed = is->extclk.speed;
1480  if (speed != 1.0)
1481  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1482  }
1483 }
1484 
1485 /* seek in the stream */
1486 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int by_bytes)
1487 {
1488  if (!is->seek_req) {
1489  is->seek_pos = pos;
1490  is->seek_rel = rel;
1491  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1492  if (by_bytes)
1493  is->seek_flags |= AVSEEK_FLAG_BYTE;
1494  is->seek_req = 1;
1495  SDL_CondSignal(is->continue_read_thread);
1496  }
1497 }
1498 
1499 /* pause or resume the video */
1501 {
1502  if (is->paused) {
1503  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1504  if (is->read_pause_return != AVERROR(ENOSYS)) {
1505  is->vidclk.paused = 0;
1506  }
1507  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1508  }
1509  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1510  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1511 }
1512 
1514 {
1516  is->step = 0;
1517 }
1518 
1520 {
1521  is->muted = !is->muted;
1522 }
1523 
1524 static void update_volume(VideoState *is, int sign, double step)
1525 {
1526  double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1527  int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0));
1528  is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1529 }
1530 
1532 {
1533  /* if the stream is paused unpause it, then step */
1534  if (is->paused)
1536  is->step = 1;
1537 }
1538 
1539 static double compute_target_delay(double delay, VideoState *is)
1540 {
1541  double sync_threshold, diff = 0;
1542 
1543  /* update delay to follow master synchronisation source */
1545  /* if video is slave, we try to correct big delays by
1546  duplicating or deleting a frame */
1547  diff = get_clock(&is->vidclk) - get_master_clock(is);
1548 
1549  /* skip or repeat frame. We take into account the
1550  delay to compute the threshold. I still don't know
1551  if it is the best guess */
1552  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1553  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1554  if (diff <= -sync_threshold)
1555  delay = FFMAX(0, delay + diff);
1556  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1557  delay = delay + diff;
1558  else if (diff >= sync_threshold)
1559  delay = 2 * delay;
1560  }
1561  }
1562 
1563  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1564  delay, -diff);
1565 
1566  return delay;
1567 }
1568 
1569 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1570  if (vp->serial == nextvp->serial) {
1571  double duration = nextvp->pts - vp->pts;
1572  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1573  return vp->duration;
1574  else
1575  return duration;
1576  } else {
1577  return 0.0;
1578  }
1579 }
1580 
1581 static void update_video_pts(VideoState *is, double pts, int serial)
1582 {
1583  /* update current video pts */
1584  set_clock(&is->vidclk, pts, serial);
1585  sync_clock_to_slave(&is->extclk, &is->vidclk);
1586 }
1587 
1588 /* called to display each frame */
1589 static void video_refresh(void *opaque, double *remaining_time)
1590 {
1591  VideoState *is = opaque;
1592  double time;
1593 
1594  Frame *sp, *sp2;
1595 
1596  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1598 
1599  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1600  time = av_gettime_relative() / 1000000.0;
1601  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1602  video_display(is);
1603  is->last_vis_time = time;
1604  }
1605  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1606  }
1607 
1608  if (is->video_st) {
1609 retry:
1610  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1611  // nothing to do, no picture to display in the queue
1612  } else {
1613  double last_duration, duration, delay;
1614  Frame *vp, *lastvp;
1615 
1616  /* dequeue the picture */
1617  lastvp = frame_queue_peek_last(&is->pictq);
1618  vp = frame_queue_peek(&is->pictq);
1619 
1620  if (vp->serial != is->videoq.serial) {
1621  frame_queue_next(&is->pictq);
1622  goto retry;
1623  }
1624 
1625  if (lastvp->serial != vp->serial)
1626  is->frame_timer = av_gettime_relative() / 1000000.0;
1627 
1628  if (is->paused)
1629  goto display;
1630 
1631  /* compute nominal last_duration */
1632  last_duration = vp_duration(is, lastvp, vp);
1633  delay = compute_target_delay(last_duration, is);
1634 
1635  time= av_gettime_relative()/1000000.0;
1636  if (time < is->frame_timer + delay) {
1637  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1638  goto display;
1639  }
1640 
1641  is->frame_timer += delay;
1642  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1643  is->frame_timer = time;
1644 
1645  SDL_LockMutex(is->pictq.mutex);
1646  if (!isnan(vp->pts))
1647  update_video_pts(is, vp->pts, vp->serial);
1648  SDL_UnlockMutex(is->pictq.mutex);
1649 
1650  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1651  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1652  duration = vp_duration(is, vp, nextvp);
1653  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1654  is->frame_drops_late++;
1655  frame_queue_next(&is->pictq);
1656  goto retry;
1657  }
1658  }
1659 
1660  if (is->subtitle_st) {
1661  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1662  sp = frame_queue_peek(&is->subpq);
1663 
1664  if (frame_queue_nb_remaining(&is->subpq) > 1)
1665  sp2 = frame_queue_peek_next(&is->subpq);
1666  else
1667  sp2 = NULL;
1668 
1669  if (sp->serial != is->subtitleq.serial
1670  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1671  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1672  {
1673  if (sp->uploaded) {
1674  int i;
1675  for (i = 0; i < sp->sub.num_rects; i++) {
1676  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1677  uint8_t *pixels;
1678  int pitch, j;
1679 
1680  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
1681  for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1682  memset(pixels, 0, sub_rect->w << 2);
1683  SDL_UnlockTexture(is->sub_texture);
1684  }
1685  }
1686  }
1687  frame_queue_next(&is->subpq);
1688  } else {
1689  break;
1690  }
1691  }
1692  }
1693 
1694  frame_queue_next(&is->pictq);
1695  is->force_refresh = 1;
1696 
1697  if (is->step && !is->paused)
1699  }
1700 display:
1701  /* display picture */
1702  if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1703  video_display(is);
1704  }
1705  is->force_refresh = 0;
1706  if (show_status) {
1707  AVBPrint buf;
1708  static int64_t last_time;
1709  int64_t cur_time;
1710  int aqsize, vqsize, sqsize;
1711  double av_diff;
1712 
1713  cur_time = av_gettime_relative();
1714  if (!last_time || (cur_time - last_time) >= 30000) {
1715  aqsize = 0;
1716  vqsize = 0;
1717  sqsize = 0;
1718  if (is->audio_st)
1719  aqsize = is->audioq.size;
1720  if (is->video_st)
1721  vqsize = is->videoq.size;
1722  if (is->subtitle_st)
1723  sqsize = is->subtitleq.size;
1724  av_diff = 0;
1725  if (is->audio_st && is->video_st)
1726  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1727  else if (is->video_st)
1728  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1729  else if (is->audio_st)
1730  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1731 
1733  av_bprintf(&buf,
1734  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1736  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1737  av_diff,
1738  is->frame_drops_early + is->frame_drops_late,
1739  aqsize / 1024,
1740  vqsize / 1024,
1741  sqsize,
1742  is->video_st ? is->viddec.avctx->pts_correction_num_faulty_dts : 0,
1743  is->video_st ? is->viddec.avctx->pts_correction_num_faulty_pts : 0);
1744 
1745  if (show_status == 1 && AV_LOG_INFO > av_log_get_level())
1746  fprintf(stderr, "%s", buf.str);
1747  else
1748  av_log(NULL, AV_LOG_INFO, "%s", buf.str);
1749 
1750  fflush(stderr);
1751  av_bprint_finalize(&buf, NULL);
1752 
1753  last_time = cur_time;
1754  }
1755  }
1756 }
1757 
1758 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1759 {
1760  Frame *vp;
1761 
1762 #if defined(DEBUG_SYNC)
1763  printf("frame_type=%c pts=%0.3f\n",
1764  av_get_picture_type_char(src_frame->pict_type), pts);
1765 #endif
1766 
1767  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1768  return -1;
1769 
1770  vp->sar = src_frame->sample_aspect_ratio;
1771  vp->uploaded = 0;
1772 
1773  vp->width = src_frame->width;
1774  vp->height = src_frame->height;
1775  vp->format = src_frame->format;
1776 
1777  vp->pts = pts;
1778  vp->duration = duration;
1779  vp->pos = pos;
1780  vp->serial = serial;
1781 
1782  set_default_window_size(vp->width, vp->height, vp->sar);
1783 
1784  av_frame_move_ref(vp->frame, src_frame);
1785  frame_queue_push(&is->pictq);
1786  return 0;
1787 }
1788 
1790 {
1791  int got_picture;
1792 
1793  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1794  return -1;
1795 
1796  if (got_picture) {
1797  double dpts = NAN;
1798 
1799  if (frame->pts != AV_NOPTS_VALUE)
1800  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1801 
1803 
1805  if (frame->pts != AV_NOPTS_VALUE) {
1806  double diff = dpts - get_master_clock(is);
1807  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1808  diff - is->frame_last_filter_delay < 0 &&
1809  is->viddec.pkt_serial == is->vidclk.serial &&
1810  is->videoq.nb_packets) {
1811  is->frame_drops_early++;
1813  got_picture = 0;
1814  }
1815  }
1816  }
1817  }
1818 
1819  return got_picture;
1820 }
1821 
1822 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1823  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1824 {
1825  int ret, i;
1826  int nb_filters = graph->nb_filters;
1828 
1829  if (filtergraph) {
1832  if (!outputs || !inputs) {
1833  ret = AVERROR(ENOMEM);
1834  goto fail;
1835  }
1836 
1837  outputs->name = av_strdup("in");
1838  outputs->filter_ctx = source_ctx;
1839  outputs->pad_idx = 0;
1840  outputs->next = NULL;
1841 
1842  inputs->name = av_strdup("out");
1843  inputs->filter_ctx = sink_ctx;
1844  inputs->pad_idx = 0;
1845  inputs->next = NULL;
1846 
1847  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1848  goto fail;
1849  } else {
1850  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1851  goto fail;
1852  }
1853 
1854  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1855  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1856  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1857 
1858  ret = avfilter_graph_config(graph, NULL);
1859 fail:
1862  return ret;
1863 }
1864 
1865 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1866 {
1868  char sws_flags_str[512] = "";
1869  char buffersrc_args[256];
1870  int ret;
1871  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1872  AVCodecParameters *codecpar = is->video_st->codecpar;
1873  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1874  const AVDictionaryEntry *e = NULL;
1875  int nb_pix_fmts = 0;
1876  int i, j;
1878 
1879  if (!par)
1880  return AVERROR(ENOMEM);
1881 
1882  for (i = 0; i < renderer_info.num_texture_formats; i++) {
1883  for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; j++) {
1884  if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
1885  pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
1886  break;
1887  }
1888  }
1889  }
1890  pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE;
1891 
1892  while ((e = av_dict_iterate(sws_dict, e))) {
1893  if (!strcmp(e->key, "sws_flags")) {
1894  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1895  } else
1896  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1897  }
1898  if (strlen(sws_flags_str))
1899  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1900 
1901  graph->scale_sws_opts = av_strdup(sws_flags_str);
1902 
1903  snprintf(buffersrc_args, sizeof(buffersrc_args),
1904  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d:"
1905  "colorspace=%d:range=%d",
1907  is->video_st->time_base.num, is->video_st->time_base.den,
1908  codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1),
1910  if (fr.num && fr.den)
1911  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1912 
1913  if ((ret = avfilter_graph_create_filter(&filt_src,
1914  avfilter_get_by_name("buffer"),
1915  "ffplay_buffer", buffersrc_args, NULL,
1916  graph)) < 0)
1917  goto fail;
1919  ret = av_buffersrc_parameters_set(filt_src, par);
1920  if (ret < 0)
1921  goto fail;
1922 
1923  ret = avfilter_graph_create_filter(&filt_out,
1924  avfilter_get_by_name("buffersink"),
1925  "ffplay_buffersink", NULL, NULL, graph);
1926  if (ret < 0)
1927  goto fail;
1928 
1929  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1930  goto fail;
1932  goto fail;
1933 
1934  last_filter = filt_out;
1935 
1936 /* Note: this macro adds a filter before the lastly added filter, so the
1937  * processing order of the filters is in reverse */
1938 #define INSERT_FILT(name, arg) do { \
1939  AVFilterContext *filt_ctx; \
1940  \
1941  ret = avfilter_graph_create_filter(&filt_ctx, \
1942  avfilter_get_by_name(name), \
1943  "ffplay_" name, arg, NULL, graph); \
1944  if (ret < 0) \
1945  goto fail; \
1946  \
1947  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1948  if (ret < 0) \
1949  goto fail; \
1950  \
1951  last_filter = filt_ctx; \
1952 } while (0)
1953 
1954  if (autorotate) {
1955  double theta = 0.0;
1956  int32_t *displaymatrix = NULL;
1958  if (sd)
1959  displaymatrix = (int32_t *)sd->data;
1960  if (!displaymatrix) {
1961  const AVPacketSideData *psd = av_packet_side_data_get(is->video_st->codecpar->coded_side_data,
1962  is->video_st->codecpar->nb_coded_side_data,
1964  if (psd)
1965  displaymatrix = (int32_t *)psd->data;
1966  }
1967  theta = get_rotation(displaymatrix);
1968 
1969  if (fabs(theta - 90) < 1.0) {
1970  INSERT_FILT("transpose", "clock");
1971  } else if (fabs(theta - 180) < 1.0) {
1972  INSERT_FILT("hflip", NULL);
1973  INSERT_FILT("vflip", NULL);
1974  } else if (fabs(theta - 270) < 1.0) {
1975  INSERT_FILT("transpose", "cclock");
1976  } else if (fabs(theta) > 1.0) {
1977  char rotate_buf[64];
1978  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1979  INSERT_FILT("rotate", rotate_buf);
1980  }
1981  }
1982 
1983  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1984  goto fail;
1985 
1986  is->in_video_filter = filt_src;
1987  is->out_video_filter = filt_out;
1988 
1989 fail:
1990  av_freep(&par);
1991  return ret;
1992 }
1993 
1994 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1995 {
1997  int sample_rates[2] = { 0, -1 };
1998  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1999  char aresample_swr_opts[512] = "";
2000  const AVDictionaryEntry *e = NULL;
2001  AVBPrint bp;
2002  char asrc_args[256];
2003  int ret;
2004 
2005  avfilter_graph_free(&is->agraph);
2006  if (!(is->agraph = avfilter_graph_alloc()))
2007  return AVERROR(ENOMEM);
2008  is->agraph->nb_threads = filter_nbthreads;
2009 
2011 
2012  while ((e = av_dict_iterate(swr_opts, e)))
2013  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
2014  if (strlen(aresample_swr_opts))
2015  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
2016  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
2017 
2018  av_channel_layout_describe_bprint(&is->audio_filter_src.ch_layout, &bp);
2019 
2020  ret = snprintf(asrc_args, sizeof(asrc_args),
2021  "sample_rate=%d:sample_fmt=%s:time_base=%d/%d:channel_layout=%s",
2022  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
2023  1, is->audio_filter_src.freq, bp.str);
2024 
2025  ret = avfilter_graph_create_filter(&filt_asrc,
2026  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
2027  asrc_args, NULL, is->agraph);
2028  if (ret < 0)
2029  goto end;
2030 
2031 
2032  ret = avfilter_graph_create_filter(&filt_asink,
2033  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
2034  NULL, NULL, is->agraph);
2035  if (ret < 0)
2036  goto end;
2037 
2038  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
2039  goto end;
2040  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
2041  goto end;
2042 
2043  if (force_output_format) {
2044  sample_rates [0] = is->audio_tgt.freq;
2045  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
2046  goto end;
2047  if ((ret = av_opt_set(filt_asink, "ch_layouts", bp.str, AV_OPT_SEARCH_CHILDREN)) < 0)
2048  goto end;
2049  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2050  goto end;
2051  }
2052 
2053 
2054  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2055  goto end;
2056 
2057  is->in_audio_filter = filt_asrc;
2058  is->out_audio_filter = filt_asink;
2059 
2060 end:
2061  if (ret < 0)
2062  avfilter_graph_free(&is->agraph);
2063  av_bprint_finalize(&bp, NULL);
2064 
2065  return ret;
2066 }
2067 
2068 static int audio_thread(void *arg)
2069 {
2070  VideoState *is = arg;
2072  Frame *af;
2073  int last_serial = -1;
2074  int reconfigure;
2075  int got_frame = 0;
2076  AVRational tb;
2077  int ret = 0;
2078 
2079  if (!frame)
2080  return AVERROR(ENOMEM);
2081 
2082  do {
2083  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2084  goto the_end;
2085 
2086  if (got_frame) {
2087  tb = (AVRational){1, frame->sample_rate};
2088 
2089  reconfigure =
2090  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.ch_layout.nb_channels,
2092  av_channel_layout_compare(&is->audio_filter_src.ch_layout, &frame->ch_layout) ||
2093  is->audio_filter_src.freq != frame->sample_rate ||
2094  is->auddec.pkt_serial != last_serial;
2095 
2096  if (reconfigure) {
2097  char buf1[1024], buf2[1024];
2098  av_channel_layout_describe(&is->audio_filter_src.ch_layout, buf1, sizeof(buf1));
2099  av_channel_layout_describe(&frame->ch_layout, buf2, sizeof(buf2));
2101  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2102  is->audio_filter_src.freq, is->audio_filter_src.ch_layout.nb_channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2104 
2105  is->audio_filter_src.fmt = frame->format;
2106  ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &frame->ch_layout);
2107  if (ret < 0)
2108  goto the_end;
2109  is->audio_filter_src.freq = frame->sample_rate;
2110  last_serial = is->auddec.pkt_serial;
2111 
2112  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2113  goto the_end;
2114  }
2115 
2116  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2117  goto the_end;
2118 
2119  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2121  tb = av_buffersink_get_time_base(is->out_audio_filter);
2122  if (!(af = frame_queue_peek_writable(&is->sampq)))
2123  goto the_end;
2124 
2125  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2126  af->pos = fd ? fd->pkt_pos : -1;
2127  af->serial = is->auddec.pkt_serial;
2129 
2131  frame_queue_push(&is->sampq);
2132 
2133  if (is->audioq.serial != is->auddec.pkt_serial)
2134  break;
2135  }
2136  if (ret == AVERROR_EOF)
2137  is->auddec.finished = is->auddec.pkt_serial;
2138  }
2139  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2140  the_end:
2141  avfilter_graph_free(&is->agraph);
2142  av_frame_free(&frame);
2143  return ret;
2144 }
2145 
2146 static int decoder_start(Decoder *d, int (*fn)(void *), const char *thread_name, void* arg)
2147 {
2148  packet_queue_start(d->queue);
2149  d->decoder_tid = SDL_CreateThread(fn, thread_name, arg);
2150  if (!d->decoder_tid) {
2151  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2152  return AVERROR(ENOMEM);
2153  }
2154  return 0;
2155 }
2156 
2157 static int video_thread(void *arg)
2158 {
2159  VideoState *is = arg;
2161  double pts;
2162  double duration;
2163  int ret;
2164  AVRational tb = is->video_st->time_base;
2165  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2166 
2167  AVFilterGraph *graph = NULL;
2168  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2169  int last_w = 0;
2170  int last_h = 0;
2171  enum AVPixelFormat last_format = -2;
2172  int last_serial = -1;
2173  int last_vfilter_idx = 0;
2174 
2175  if (!frame)
2176  return AVERROR(ENOMEM);
2177 
2178  for (;;) {
2180  if (ret < 0)
2181  goto the_end;
2182  if (!ret)
2183  continue;
2184 
2185  if ( last_w != frame->width
2186  || last_h != frame->height
2187  || last_format != frame->format
2188  || last_serial != is->viddec.pkt_serial
2189  || last_vfilter_idx != is->vfilter_idx) {
2191  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2192  last_w, last_h,
2193  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2194  frame->width, frame->height,
2195  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2196  avfilter_graph_free(&graph);
2197  graph = avfilter_graph_alloc();
2198  if (!graph) {
2199  ret = AVERROR(ENOMEM);
2200  goto the_end;
2201  }
2202  graph->nb_threads = filter_nbthreads;
2203  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2204  SDL_Event event;
2205  event.type = FF_QUIT_EVENT;
2206  event.user.data1 = is;
2207  SDL_PushEvent(&event);
2208  goto the_end;
2209  }
2210  filt_in = is->in_video_filter;
2211  filt_out = is->out_video_filter;
2212  last_w = frame->width;
2213  last_h = frame->height;
2214  last_format = frame->format;
2215  last_serial = is->viddec.pkt_serial;
2216  last_vfilter_idx = is->vfilter_idx;
2217  frame_rate = av_buffersink_get_frame_rate(filt_out);
2218  }
2219 
2220  ret = av_buffersrc_add_frame(filt_in, frame);
2221  if (ret < 0)
2222  goto the_end;
2223 
2224  while (ret >= 0) {
2225  FrameData *fd;
2226 
2227  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2228 
2229  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2230  if (ret < 0) {
2231  if (ret == AVERROR_EOF)
2232  is->viddec.finished = is->viddec.pkt_serial;
2233  ret = 0;
2234  break;
2235  }
2236 
2238 
2239  is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
2240  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2241  is->frame_last_filter_delay = 0;
2242  tb = av_buffersink_get_time_base(filt_out);
2243  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2244  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2245  ret = queue_picture(is, frame, pts, duration, fd ? fd->pkt_pos : -1, is->viddec.pkt_serial);
2247  if (is->videoq.serial != is->viddec.pkt_serial)
2248  break;
2249  }
2250 
2251  if (ret < 0)
2252  goto the_end;
2253  }
2254  the_end:
2255  avfilter_graph_free(&graph);
2256  av_frame_free(&frame);
2257  return 0;
2258 }
2259 
2260 static int subtitle_thread(void *arg)
2261 {
2262  VideoState *is = arg;
2263  Frame *sp;
2264  int got_subtitle;
2265  double pts;
2266 
2267  for (;;) {
2268  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2269  return 0;
2270 
2271  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2272  break;
2273 
2274  pts = 0;
2275 
2276  if (got_subtitle && sp->sub.format == 0) {
2277  if (sp->sub.pts != AV_NOPTS_VALUE)
2278  pts = sp->sub.pts / (double)AV_TIME_BASE;
2279  sp->pts = pts;
2280  sp->serial = is->subdec.pkt_serial;
2281  sp->width = is->subdec.avctx->width;
2282  sp->height = is->subdec.avctx->height;
2283  sp->uploaded = 0;
2284 
2285  /* now we can update the picture count */
2286  frame_queue_push(&is->subpq);
2287  } else if (got_subtitle) {
2288  avsubtitle_free(&sp->sub);
2289  }
2290  }
2291  return 0;
2292 }
2293 
2294 /* copy samples for viewing in editor window */
2295 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2296 {
2297  int size, len;
2298 
2299  size = samples_size / sizeof(short);
2300  while (size > 0) {
2301  len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2302  if (len > size)
2303  len = size;
2304  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2305  samples += len;
2306  is->sample_array_index += len;
2307  if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2308  is->sample_array_index = 0;
2309  size -= len;
2310  }
2311 }
2312 
2313 /* return the wanted number of samples to get better sync if sync_type is video
2314  * or external master clock */
2315 static int synchronize_audio(VideoState *is, int nb_samples)
2316 {
2317  int wanted_nb_samples = nb_samples;
2318 
2319  /* if not master, then we try to remove or add samples to correct the clock */
2321  double diff, avg_diff;
2322  int min_nb_samples, max_nb_samples;
2323 
2324  diff = get_clock(&is->audclk) - get_master_clock(is);
2325 
2326  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2327  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2328  if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2329  /* not enough measures to have a correct estimate */
2330  is->audio_diff_avg_count++;
2331  } else {
2332  /* estimate the A-V difference */
2333  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2334 
2335  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2336  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2337  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2338  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2339  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2340  }
2341  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2342  diff, avg_diff, wanted_nb_samples - nb_samples,
2343  is->audio_clock, is->audio_diff_threshold);
2344  }
2345  } else {
2346  /* too big difference : may be initial PTS errors, so
2347  reset A-V filter */
2348  is->audio_diff_avg_count = 0;
2349  is->audio_diff_cum = 0;
2350  }
2351  }
2352 
2353  return wanted_nb_samples;
2354 }
2355 
2356 /**
2357  * Decode one audio frame and return its uncompressed size.
2358  *
2359  * The processed audio frame is decoded, converted if required, and
2360  * stored in is->audio_buf, with size in bytes given by the return
2361  * value.
2362  */
2364 {
2365  int data_size, resampled_data_size;
2366  av_unused double audio_clock0;
2367  int wanted_nb_samples;
2368  Frame *af;
2369 
2370  if (is->paused)
2371  return -1;
2372 
2373  do {
2374 #if defined(_WIN32)
2375  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2376  if ((av_gettime_relative() - audio_callback_time) > 1000000LL * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec / 2)
2377  return -1;
2378  av_usleep (1000);
2379  }
2380 #endif
2381  if (!(af = frame_queue_peek_readable(&is->sampq)))
2382  return -1;
2383  frame_queue_next(&is->sampq);
2384  } while (af->serial != is->audioq.serial);
2385 
2387  af->frame->nb_samples,
2388  af->frame->format, 1);
2389 
2390  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2391 
2392  if (af->frame->format != is->audio_src.fmt ||
2393  av_channel_layout_compare(&af->frame->ch_layout, &is->audio_src.ch_layout) ||
2394  af->frame->sample_rate != is->audio_src.freq ||
2395  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2396  swr_free(&is->swr_ctx);
2397  swr_alloc_set_opts2(&is->swr_ctx,
2398  &is->audio_tgt.ch_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2399  &af->frame->ch_layout, af->frame->format, af->frame->sample_rate,
2400  0, NULL);
2401  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2403  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2405  is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.ch_layout.nb_channels);
2406  swr_free(&is->swr_ctx);
2407  return -1;
2408  }
2409  if (av_channel_layout_copy(&is->audio_src.ch_layout, &af->frame->ch_layout) < 0)
2410  return -1;
2411  is->audio_src.freq = af->frame->sample_rate;
2412  is->audio_src.fmt = af->frame->format;
2413  }
2414 
2415  if (is->swr_ctx) {
2416  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2417  uint8_t **out = &is->audio_buf1;
2418  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2419  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.ch_layout.nb_channels, out_count, is->audio_tgt.fmt, 0);
2420  int len2;
2421  if (out_size < 0) {
2422  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2423  return -1;
2424  }
2425  if (wanted_nb_samples != af->frame->nb_samples) {
2426  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2427  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2428  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2429  return -1;
2430  }
2431  }
2432  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2433  if (!is->audio_buf1)
2434  return AVERROR(ENOMEM);
2435  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2436  if (len2 < 0) {
2437  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2438  return -1;
2439  }
2440  if (len2 == out_count) {
2441  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2442  if (swr_init(is->swr_ctx) < 0)
2443  swr_free(&is->swr_ctx);
2444  }
2445  is->audio_buf = is->audio_buf1;
2446  resampled_data_size = len2 * is->audio_tgt.ch_layout.nb_channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2447  } else {
2448  is->audio_buf = af->frame->data[0];
2449  resampled_data_size = data_size;
2450  }
2451 
2452  audio_clock0 = is->audio_clock;
2453  /* update the audio clock with the pts */
2454  if (!isnan(af->pts))
2455  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2456  else
2457  is->audio_clock = NAN;
2458  is->audio_clock_serial = af->serial;
2459 #ifdef DEBUG
2460  {
2461  static double last_clock;
2462  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2463  is->audio_clock - last_clock,
2464  is->audio_clock, audio_clock0);
2465  last_clock = is->audio_clock;
2466  }
2467 #endif
2468  return resampled_data_size;
2469 }
2470 
2471 /* prepare a new audio buffer */
2472 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2473 {
2474  VideoState *is = opaque;
2475  int audio_size, len1;
2476 
2478 
2479  while (len > 0) {
2480  if (is->audio_buf_index >= is->audio_buf_size) {
2481  audio_size = audio_decode_frame(is);
2482  if (audio_size < 0) {
2483  /* if error, just output silence */
2484  is->audio_buf = NULL;
2485  is->audio_buf_size = SDL_AUDIO_MIN_BUFFER_SIZE / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2486  } else {
2487  if (is->show_mode != SHOW_MODE_VIDEO)
2488  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2489  is->audio_buf_size = audio_size;
2490  }
2491  is->audio_buf_index = 0;
2492  }
2493  len1 = is->audio_buf_size - is->audio_buf_index;
2494  if (len1 > len)
2495  len1 = len;
2496  if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2497  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2498  else {
2499  memset(stream, 0, len1);
2500  if (!is->muted && is->audio_buf)
2501  SDL_MixAudioFormat(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, is->audio_volume);
2502  }
2503  len -= len1;
2504  stream += len1;
2505  is->audio_buf_index += len1;
2506  }
2507  is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2508  /* Let's assume the audio driver that is used by SDL has two periods. */
2509  if (!isnan(is->audio_clock)) {
2510  set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
2511  sync_clock_to_slave(&is->extclk, &is->audclk);
2512  }
2513 }
2514 
2515 static int audio_open(void *opaque, AVChannelLayout *wanted_channel_layout, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2516 {
2517  SDL_AudioSpec wanted_spec, spec;
2518  const char *env;
2519  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2520  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2521  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2522  int wanted_nb_channels = wanted_channel_layout->nb_channels;
2523 
2524  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2525  if (env) {
2526  wanted_nb_channels = atoi(env);
2527  av_channel_layout_uninit(wanted_channel_layout);
2528  av_channel_layout_default(wanted_channel_layout, wanted_nb_channels);
2529  }
2530  if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) {
2531  av_channel_layout_uninit(wanted_channel_layout);
2532  av_channel_layout_default(wanted_channel_layout, wanted_nb_channels);
2533  }
2534  wanted_nb_channels = wanted_channel_layout->nb_channels;
2535  wanted_spec.channels = wanted_nb_channels;
2536  wanted_spec.freq = wanted_sample_rate;
2537  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2538  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2539  return -1;
2540  }
2541  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2542  next_sample_rate_idx--;
2543  wanted_spec.format = AUDIO_S16SYS;
2544  wanted_spec.silence = 0;
2545  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2546  wanted_spec.callback = sdl_audio_callback;
2547  wanted_spec.userdata = opaque;
2548  while (!(audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2549  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2550  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2551  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2552  if (!wanted_spec.channels) {
2553  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2554  wanted_spec.channels = wanted_nb_channels;
2555  if (!wanted_spec.freq) {
2557  "No more combinations to try, audio open failed\n");
2558  return -1;
2559  }
2560  }
2561  av_channel_layout_default(wanted_channel_layout, wanted_spec.channels);
2562  }
2563  if (spec.format != AUDIO_S16SYS) {
2565  "SDL advised audio format %d is not supported!\n", spec.format);
2566  return -1;
2567  }
2568  if (spec.channels != wanted_spec.channels) {
2569  av_channel_layout_uninit(wanted_channel_layout);
2570  av_channel_layout_default(wanted_channel_layout, spec.channels);
2571  if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) {
2573  "SDL advised channel count %d is not supported!\n", spec.channels);
2574  return -1;
2575  }
2576  }
2577 
2578  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2579  audio_hw_params->freq = spec.freq;
2580  if (av_channel_layout_copy(&audio_hw_params->ch_layout, wanted_channel_layout) < 0)
2581  return -1;
2582  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, 1, audio_hw_params->fmt, 1);
2583  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2584  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2585  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2586  return -1;
2587  }
2588  return spec.size;
2589 }
2590 
2591 static int create_hwaccel(AVBufferRef **device_ctx)
2592 {
2593  enum AVHWDeviceType type;
2594  int ret;
2595  AVBufferRef *vk_dev;
2596 
2597  *device_ctx = NULL;
2598 
2599  if (!hwaccel)
2600  return 0;
2601 
2603  if (type == AV_HWDEVICE_TYPE_NONE)
2604  return AVERROR(ENOTSUP);
2605 
2607  if (ret < 0)
2608  return ret;
2609 
2610  ret = av_hwdevice_ctx_create_derived(device_ctx, type, vk_dev, 0);
2611  if (!ret)
2612  return 0;
2613 
2614  if (ret != AVERROR(ENOSYS))
2615  return ret;
2616 
2617  av_log(NULL, AV_LOG_WARNING, "Derive %s from vulkan not supported.\n", hwaccel);
2618  ret = av_hwdevice_ctx_create(device_ctx, type, NULL, NULL, 0);
2619  return ret;
2620 }
2621 
2622 /* open a given stream. Return 0 if OK */
2623 static int stream_component_open(VideoState *is, int stream_index)
2624 {
2625  AVFormatContext *ic = is->ic;
2626  AVCodecContext *avctx;
2627  const AVCodec *codec;
2628  const char *forced_codec_name = NULL;
2629  AVDictionary *opts = NULL;
2630  const AVDictionaryEntry *t = NULL;
2631  int sample_rate;
2632  AVChannelLayout ch_layout = { 0 };
2633  int ret = 0;
2634  int stream_lowres = lowres;
2635 
2636  if (stream_index < 0 || stream_index >= ic->nb_streams)
2637  return -1;
2638 
2639  avctx = avcodec_alloc_context3(NULL);
2640  if (!avctx)
2641  return AVERROR(ENOMEM);
2642 
2643  ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2644  if (ret < 0)
2645  goto fail;
2646  avctx->pkt_timebase = ic->streams[stream_index]->time_base;
2647 
2648  codec = avcodec_find_decoder(avctx->codec_id);
2649 
2650  switch(avctx->codec_type){
2651  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2652  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2653  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2654  }
2655  if (forced_codec_name)
2656  codec = avcodec_find_decoder_by_name(forced_codec_name);
2657  if (!codec) {
2658  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2659  "No codec could be found with name '%s'\n", forced_codec_name);
2660  else av_log(NULL, AV_LOG_WARNING,
2661  "No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id));
2662  ret = AVERROR(EINVAL);
2663  goto fail;
2664  }
2665 
2666  avctx->codec_id = codec->id;
2667  if (stream_lowres > codec->max_lowres) {
2668  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2669  codec->max_lowres);
2670  stream_lowres = codec->max_lowres;
2671  }
2672  avctx->lowres = stream_lowres;
2673 
2674  if (fast)
2675  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2676 
2677  ret = filter_codec_opts(codec_opts, avctx->codec_id, ic,
2678  ic->streams[stream_index], codec, &opts);
2679  if (ret < 0)
2680  goto fail;
2681 
2682  if (!av_dict_get(opts, "threads", NULL, 0))
2683  av_dict_set(&opts, "threads", "auto", 0);
2684  if (stream_lowres)
2685  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2686 
2687  av_dict_set(&opts, "flags", "+copy_opaque", AV_DICT_MULTIKEY);
2688 
2689  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2690  ret = create_hwaccel(&avctx->hw_device_ctx);
2691  if (ret < 0)
2692  goto fail;
2693  }
2694 
2695  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2696  goto fail;
2697  }
2698  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2699  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2701  goto fail;
2702  }
2703 
2704  is->eof = 0;
2705  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2706  switch (avctx->codec_type) {
2707  case AVMEDIA_TYPE_AUDIO:
2708  {
2709  AVFilterContext *sink;
2710 
2711  is->audio_filter_src.freq = avctx->sample_rate;
2712  ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &avctx->ch_layout);
2713  if (ret < 0)
2714  goto fail;
2715  is->audio_filter_src.fmt = avctx->sample_fmt;
2716  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2717  goto fail;
2718  sink = is->out_audio_filter;
2720  ret = av_buffersink_get_ch_layout(sink, &ch_layout);
2721  if (ret < 0)
2722  goto fail;
2723  }
2724 
2725  /* prepare audio output */
2726  if ((ret = audio_open(is, &ch_layout, sample_rate, &is->audio_tgt)) < 0)
2727  goto fail;
2728  is->audio_hw_buf_size = ret;
2729  is->audio_src = is->audio_tgt;
2730  is->audio_buf_size = 0;
2731  is->audio_buf_index = 0;
2732 
2733  /* init averaging filter */
2734  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2735  is->audio_diff_avg_count = 0;
2736  /* since we do not have a precise anough audio FIFO fullness,
2737  we correct audio sync only if larger than this threshold */
2738  is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;
2739 
2740  is->audio_stream = stream_index;
2741  is->audio_st = ic->streams[stream_index];
2742 
2743  if ((ret = decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread)) < 0)
2744  goto fail;
2745  if (is->ic->iformat->flags & AVFMT_NOTIMESTAMPS) {
2746  is->auddec.start_pts = is->audio_st->start_time;
2747  is->auddec.start_pts_tb = is->audio_st->time_base;
2748  }
2749  if ((ret = decoder_start(&is->auddec, audio_thread, "audio_decoder", is)) < 0)
2750  goto out;
2751  SDL_PauseAudioDevice(audio_dev, 0);
2752  break;
2753  case AVMEDIA_TYPE_VIDEO:
2754  is->video_stream = stream_index;
2755  is->video_st = ic->streams[stream_index];
2756 
2757  if ((ret = decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread)) < 0)
2758  goto fail;
2759  if ((ret = decoder_start(&is->viddec, video_thread, "video_decoder", is)) < 0)
2760  goto out;
2761  is->queue_attachments_req = 1;
2762  break;
2763  case AVMEDIA_TYPE_SUBTITLE:
2764  is->subtitle_stream = stream_index;
2765  is->subtitle_st = ic->streams[stream_index];
2766 
2767  if ((ret = decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread)) < 0)
2768  goto fail;
2769  if ((ret = decoder_start(&is->subdec, subtitle_thread, "subtitle_decoder", is)) < 0)
2770  goto out;
2771  break;
2772  default:
2773  break;
2774  }
2775  goto out;
2776 
2777 fail:
2778  avcodec_free_context(&avctx);
2779 out:
2780  av_channel_layout_uninit(&ch_layout);
2781  av_dict_free(&opts);
2782 
2783  return ret;
2784 }
2785 
2786 static int decode_interrupt_cb(void *ctx)
2787 {
2788  VideoState *is = ctx;
2789  return is->abort_request;
2790 }
2791 
2792 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2793  return stream_id < 0 ||
2794  queue->abort_request ||
2796  queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2797 }
2798 
2800 {
2801  if( !strcmp(s->iformat->name, "rtp")
2802  || !strcmp(s->iformat->name, "rtsp")
2803  || !strcmp(s->iformat->name, "sdp")
2804  )
2805  return 1;
2806 
2807  if(s->pb && ( !strncmp(s->url, "rtp:", 4)
2808  || !strncmp(s->url, "udp:", 4)
2809  )
2810  )
2811  return 1;
2812  return 0;
2813 }
2814 
2815 /* this thread gets the stream from the disk or the network */
2816 static int read_thread(void *arg)
2817 {
2818  VideoState *is = arg;
2819  AVFormatContext *ic = NULL;
2820  int err, i, ret;
2821  int st_index[AVMEDIA_TYPE_NB];
2822  AVPacket *pkt = NULL;
2823  int64_t stream_start_time;
2824  int pkt_in_play_range = 0;
2825  const AVDictionaryEntry *t;
2826  SDL_mutex *wait_mutex = SDL_CreateMutex();
2827  int scan_all_pmts_set = 0;
2828  int64_t pkt_ts;
2829 
2830  if (!wait_mutex) {
2831  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2832  ret = AVERROR(ENOMEM);
2833  goto fail;
2834  }
2835 
2836  memset(st_index, -1, sizeof(st_index));
2837  is->eof = 0;
2838 
2839  pkt = av_packet_alloc();
2840  if (!pkt) {
2841  av_log(NULL, AV_LOG_FATAL, "Could not allocate packet.\n");
2842  ret = AVERROR(ENOMEM);
2843  goto fail;
2844  }
2845  ic = avformat_alloc_context();
2846  if (!ic) {
2847  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2848  ret = AVERROR(ENOMEM);
2849  goto fail;
2850  }
2853  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2854  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2855  scan_all_pmts_set = 1;
2856  }
2857  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2858  if (err < 0) {
2859  print_error(is->filename, err);
2860  ret = -1;
2861  goto fail;
2862  }
2863  if (scan_all_pmts_set)
2864  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2865 
2867  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2869  goto fail;
2870  }
2871  is->ic = ic;
2872 
2873  if (genpts)
2874  ic->flags |= AVFMT_FLAG_GENPTS;
2875 
2876  if (find_stream_info) {
2877  AVDictionary **opts;
2878  int orig_nb_streams = ic->nb_streams;
2879 
2881  if (err < 0) {
2883  "Error setting up avformat_find_stream_info() options\n");
2884  ret = err;
2885  goto fail;
2886  }
2887 
2888  err = avformat_find_stream_info(ic, opts);
2889 
2890  for (i = 0; i < orig_nb_streams; i++)
2891  av_dict_free(&opts[i]);
2892  av_freep(&opts);
2893 
2894  if (err < 0) {
2896  "%s: could not find codec parameters\n", is->filename);
2897  ret = -1;
2898  goto fail;
2899  }
2900  }
2901 
2902  if (ic->pb)
2903  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2904 
2905  if (seek_by_bytes < 0)
2907  !!(ic->iformat->flags & AVFMT_TS_DISCONT) &&
2908  strcmp("ogg", ic->iformat->name);
2909 
2910  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2911 
2912  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2913  window_title = av_asprintf("%s - %s", t->value, input_filename);
2914 
2915  /* if seeking requested, we execute it */
2916  if (start_time != AV_NOPTS_VALUE) {
2917  int64_t timestamp;
2918 
2919  timestamp = start_time;
2920  /* add the stream start time */
2921  if (ic->start_time != AV_NOPTS_VALUE)
2922  timestamp += ic->start_time;
2923  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2924  if (ret < 0) {
2925  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2926  is->filename, (double)timestamp / AV_TIME_BASE);
2927  }
2928  }
2929 
2930  is->realtime = is_realtime(ic);
2931 
2932  if (show_status)
2933  av_dump_format(ic, 0, is->filename, 0);
2934 
2935  for (i = 0; i < ic->nb_streams; i++) {
2936  AVStream *st = ic->streams[i];
2937  enum AVMediaType type = st->codecpar->codec_type;
2938  st->discard = AVDISCARD_ALL;
2939  if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
2941  st_index[type] = i;
2942  }
2943  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2944  if (wanted_stream_spec[i] && st_index[i] == -1) {
2945  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2946  st_index[i] = INT_MAX;
2947  }
2948  }
2949 
2950  if (!video_disable)
2951  st_index[AVMEDIA_TYPE_VIDEO] =
2953  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2954  if (!audio_disable)
2955  st_index[AVMEDIA_TYPE_AUDIO] =
2957  st_index[AVMEDIA_TYPE_AUDIO],
2958  st_index[AVMEDIA_TYPE_VIDEO],
2959  NULL, 0);
2961  st_index[AVMEDIA_TYPE_SUBTITLE] =
2963  st_index[AVMEDIA_TYPE_SUBTITLE],
2964  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2965  st_index[AVMEDIA_TYPE_AUDIO] :
2966  st_index[AVMEDIA_TYPE_VIDEO]),
2967  NULL, 0);
2968 
2969  is->show_mode = show_mode;
2970  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2971  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2972  AVCodecParameters *codecpar = st->codecpar;
2974  if (codecpar->width)
2975  set_default_window_size(codecpar->width, codecpar->height, sar);
2976  }
2977 
2978  /* open the streams */
2979  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2981  }
2982 
2983  ret = -1;
2984  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2986  }
2987  if (is->show_mode == SHOW_MODE_NONE)
2988  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2989 
2990  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2992  }
2993 
2994  if (is->video_stream < 0 && is->audio_stream < 0) {
2995  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2996  is->filename);
2997  ret = -1;
2998  goto fail;
2999  }
3000 
3001  if (infinite_buffer < 0 && is->realtime)
3002  infinite_buffer = 1;
3003 
3004  for (;;) {
3005  if (is->abort_request)
3006  break;
3007  if (is->paused != is->last_paused) {
3008  is->last_paused = is->paused;
3009  if (is->paused)
3010  is->read_pause_return = av_read_pause(ic);
3011  else
3012  av_read_play(ic);
3013  }
3014 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
3015  if (is->paused &&
3016  (!strcmp(ic->iformat->name, "rtsp") ||
3017  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
3018  /* wait 10 ms to avoid trying to get another packet */
3019  /* XXX: horrible */
3020  SDL_Delay(10);
3021  continue;
3022  }
3023 #endif
3024  if (is->seek_req) {
3025  int64_t seek_target = is->seek_pos;
3026  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
3027  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
3028 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
3029 // of the seek_pos/seek_rel variables
3030 
3031  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
3032  if (ret < 0) {
3034  "%s: error while seeking\n", is->ic->url);
3035  } else {
3036  if (is->audio_stream >= 0)
3037  packet_queue_flush(&is->audioq);
3038  if (is->subtitle_stream >= 0)
3039  packet_queue_flush(&is->subtitleq);
3040  if (is->video_stream >= 0)
3041  packet_queue_flush(&is->videoq);
3042  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
3043  set_clock(&is->extclk, NAN, 0);
3044  } else {
3045  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
3046  }
3047  }
3048  is->seek_req = 0;
3049  is->queue_attachments_req = 1;
3050  is->eof = 0;
3051  if (is->paused)
3053  }
3054  if (is->queue_attachments_req) {
3055  if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
3056  if ((ret = av_packet_ref(pkt, &is->video_st->attached_pic)) < 0)
3057  goto fail;
3058  packet_queue_put(&is->videoq, pkt);
3059  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
3060  }
3061  is->queue_attachments_req = 0;
3062  }
3063 
3064  /* if the queue are full, no need to read more */
3065  if (infinite_buffer<1 &&
3066  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
3067  || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
3068  stream_has_enough_packets(is->video_st, is->video_stream, &is->videoq) &&
3069  stream_has_enough_packets(is->subtitle_st, is->subtitle_stream, &is->subtitleq)))) {
3070  /* wait 10 ms */
3071  SDL_LockMutex(wait_mutex);
3072  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3073  SDL_UnlockMutex(wait_mutex);
3074  continue;
3075  }
3076  if (!is->paused &&
3077  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3078  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3079  if (loop != 1 && (!loop || --loop)) {
3081  } else if (autoexit) {
3082  ret = AVERROR_EOF;
3083  goto fail;
3084  }
3085  }
3086  ret = av_read_frame(ic, pkt);
3087  if (ret < 0) {
3088  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3089  if (is->video_stream >= 0)
3090  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
3091  if (is->audio_stream >= 0)
3092  packet_queue_put_nullpacket(&is->audioq, pkt, is->audio_stream);
3093  if (is->subtitle_stream >= 0)
3094  packet_queue_put_nullpacket(&is->subtitleq, pkt, is->subtitle_stream);
3095  is->eof = 1;
3096  }
3097  if (ic->pb && ic->pb->error) {
3098  if (autoexit)
3099  goto fail;
3100  else
3101  break;
3102  }
3103  SDL_LockMutex(wait_mutex);
3104  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3105  SDL_UnlockMutex(wait_mutex);
3106  continue;
3107  } else {
3108  is->eof = 0;
3109  }
3110  /* check if packet is in play range specified by user, then queue, otherwise discard */
3111  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3112  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3113  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3114  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3116  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3117  <= ((double)duration / 1000000);
3118  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3119  packet_queue_put(&is->audioq, pkt);
3120  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3121  && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
3122  packet_queue_put(&is->videoq, pkt);
3123  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3124  packet_queue_put(&is->subtitleq, pkt);
3125  } else {
3127  }
3128  }
3129 
3130  ret = 0;
3131  fail:
3132  if (ic && !is->ic)
3133  avformat_close_input(&ic);
3134 
3135  av_packet_free(&pkt);
3136  if (ret != 0) {
3137  SDL_Event event;
3138 
3139  event.type = FF_QUIT_EVENT;
3140  event.user.data1 = is;
3141  SDL_PushEvent(&event);
3142  }
3143  SDL_DestroyMutex(wait_mutex);
3144  return 0;
3145 }
3146 
3147 static VideoState *stream_open(const char *filename,
3148  const AVInputFormat *iformat)
3149 {
3150  VideoState *is;
3151 
3152  is = av_mallocz(sizeof(VideoState));
3153  if (!is)
3154  return NULL;
3155  is->last_video_stream = is->video_stream = -1;
3156  is->last_audio_stream = is->audio_stream = -1;
3157  is->last_subtitle_stream = is->subtitle_stream = -1;
3158  is->filename = av_strdup(filename);
3159  if (!is->filename)
3160  goto fail;
3161  is->iformat = iformat;
3162  is->ytop = 0;
3163  is->xleft = 0;
3164 
3165  /* start video display */
3166  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3167  goto fail;
3168  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3169  goto fail;
3170  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3171  goto fail;
3172 
3173  if (packet_queue_init(&is->videoq) < 0 ||
3174  packet_queue_init(&is->audioq) < 0 ||
3175  packet_queue_init(&is->subtitleq) < 0)
3176  goto fail;
3177 
3178  if (!(is->continue_read_thread = SDL_CreateCond())) {
3179  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3180  goto fail;
3181  }
3182 
3183  init_clock(&is->vidclk, &is->videoq.serial);
3184  init_clock(&is->audclk, &is->audioq.serial);
3185  init_clock(&is->extclk, &is->extclk.serial);
3186  is->audio_clock_serial = -1;
3187  if (startup_volume < 0)
3188  av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
3189  if (startup_volume > 100)
3190  av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
3192  startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
3193  is->audio_volume = startup_volume;
3194  is->muted = 0;
3195  is->av_sync_type = av_sync_type;
3196  is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
3197  if (!is->read_tid) {
3198  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3199 fail:
3200  stream_close(is);
3201  return NULL;
3202  }
3203  return is;
3204 }
3205 
3207 {
3208  AVFormatContext *ic = is->ic;
3209  int start_index, stream_index;
3210  int old_index;
3211  AVStream *st;
3212  AVProgram *p = NULL;
3213  int nb_streams = is->ic->nb_streams;
3214 
3215  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3216  start_index = is->last_video_stream;
3217  old_index = is->video_stream;
3218  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3219  start_index = is->last_audio_stream;
3220  old_index = is->audio_stream;
3221  } else {
3222  start_index = is->last_subtitle_stream;
3223  old_index = is->subtitle_stream;
3224  }
3225  stream_index = start_index;
3226 
3227  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3228  p = av_find_program_from_stream(ic, NULL, is->video_stream);
3229  if (p) {
3231  for (start_index = 0; start_index < nb_streams; start_index++)
3232  if (p->stream_index[start_index] == stream_index)
3233  break;
3234  if (start_index == nb_streams)
3235  start_index = -1;
3236  stream_index = start_index;
3237  }
3238  }
3239 
3240  for (;;) {
3241  if (++stream_index >= nb_streams)
3242  {
3244  {
3245  stream_index = -1;
3246  is->last_subtitle_stream = -1;
3247  goto the_end;
3248  }
3249  if (start_index == -1)
3250  return;
3251  stream_index = 0;
3252  }
3253  if (stream_index == start_index)
3254  return;
3255  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3256  if (st->codecpar->codec_type == codec_type) {
3257  /* check that parameters are OK */
3258  switch (codec_type) {
3259  case AVMEDIA_TYPE_AUDIO:
3260  if (st->codecpar->sample_rate != 0 &&
3261  st->codecpar->ch_layout.nb_channels != 0)
3262  goto the_end;
3263  break;
3264  case AVMEDIA_TYPE_VIDEO:
3265  case AVMEDIA_TYPE_SUBTITLE:
3266  goto the_end;
3267  default:
3268  break;
3269  }
3270  }
3271  }
3272  the_end:
3273  if (p && stream_index != -1)
3274  stream_index = p->stream_index[stream_index];
3275  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3277  old_index,
3278  stream_index);
3279 
3280  stream_component_close(is, old_index);
3281  stream_component_open(is, stream_index);
3282 }
3283 
3284 
3286 {
3288  SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
3289 }
3290 
3292 {
3293  int next = is->show_mode;
3294  do {
3295  next = (next + 1) % SHOW_MODE_NB;
3296  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3297  if (is->show_mode != next) {
3298  is->force_refresh = 1;
3299  is->show_mode = next;
3300  }
3301 }
3302 
3303 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3304  double remaining_time = 0.0;
3305  SDL_PumpEvents();
3306  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3308  SDL_ShowCursor(0);
3309  cursor_hidden = 1;
3310  }
3311  if (remaining_time > 0.0)
3312  av_usleep((int64_t)(remaining_time * 1000000.0));
3313  remaining_time = REFRESH_RATE;
3314  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3315  video_refresh(is, &remaining_time);
3316  SDL_PumpEvents();
3317  }
3318 }
3319 
3320 static void seek_chapter(VideoState *is, int incr)
3321 {
3322  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3323  int i;
3324 
3325  if (!is->ic->nb_chapters)
3326  return;
3327 
3328  /* find the current chapter */
3329  for (i = 0; i < is->ic->nb_chapters; i++) {
3330  AVChapter *ch = is->ic->chapters[i];
3331  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3332  i--;
3333  break;
3334  }
3335  }
3336 
3337  i += incr;
3338  i = FFMAX(i, 0);
3339  if (i >= is->ic->nb_chapters)
3340  return;
3341 
3342  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3343  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3344  AV_TIME_BASE_Q), 0, 0);
3345 }
3346 
3347 /* handle an event sent by the GUI */
3348 static void event_loop(VideoState *cur_stream)
3349 {
3350  SDL_Event event;
3351  double incr, pos, frac;
3352 
3353  for (;;) {
3354  double x;
3355  refresh_loop_wait_event(cur_stream, &event);
3356  switch (event.type) {
3357  case SDL_KEYDOWN:
3358  if (exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
3359  do_exit(cur_stream);
3360  break;
3361  }
3362  // If we don't yet have a window, skip all key events, because read_thread might still be initializing...
3363  if (!cur_stream->width)
3364  continue;
3365  switch (event.key.keysym.sym) {
3366  case SDLK_f:
3367  toggle_full_screen(cur_stream);
3368  cur_stream->force_refresh = 1;
3369  break;
3370  case SDLK_p:
3371  case SDLK_SPACE:
3372  toggle_pause(cur_stream);
3373  break;
3374  case SDLK_m:
3375  toggle_mute(cur_stream);
3376  break;
3377  case SDLK_KP_MULTIPLY:
3378  case SDLK_0:
3379  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3380  break;
3381  case SDLK_KP_DIVIDE:
3382  case SDLK_9:
3383  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3384  break;
3385  case SDLK_s: // S: Step to next frame
3386  step_to_next_frame(cur_stream);
3387  break;
3388  case SDLK_a:
3390  break;
3391  case SDLK_v:
3393  break;
3394  case SDLK_c:
3398  break;
3399  case SDLK_t:
3401  break;
3402  case SDLK_w:
3403  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3404  if (++cur_stream->vfilter_idx >= nb_vfilters)
3405  cur_stream->vfilter_idx = 0;
3406  } else {
3407  cur_stream->vfilter_idx = 0;
3408  toggle_audio_display(cur_stream);
3409  }
3410  break;
3411  case SDLK_PAGEUP:
3412  if (cur_stream->ic->nb_chapters <= 1) {
3413  incr = 600.0;
3414  goto do_seek;
3415  }
3416  seek_chapter(cur_stream, 1);
3417  break;
3418  case SDLK_PAGEDOWN:
3419  if (cur_stream->ic->nb_chapters <= 1) {
3420  incr = -600.0;
3421  goto do_seek;
3422  }
3423  seek_chapter(cur_stream, -1);
3424  break;
3425  case SDLK_LEFT:
3426  incr = seek_interval ? -seek_interval : -10.0;
3427  goto do_seek;
3428  case SDLK_RIGHT:
3429  incr = seek_interval ? seek_interval : 10.0;
3430  goto do_seek;
3431  case SDLK_UP:
3432  incr = 60.0;
3433  goto do_seek;
3434  case SDLK_DOWN:
3435  incr = -60.0;
3436  do_seek:
3437  if (seek_by_bytes) {
3438  pos = -1;
3439  if (pos < 0 && cur_stream->video_stream >= 0)
3440  pos = frame_queue_last_pos(&cur_stream->pictq);
3441  if (pos < 0 && cur_stream->audio_stream >= 0)
3442  pos = frame_queue_last_pos(&cur_stream->sampq);
3443  if (pos < 0)
3444  pos = avio_tell(cur_stream->ic->pb);
3445  if (cur_stream->ic->bit_rate)
3446  incr *= cur_stream->ic->bit_rate / 8.0;
3447  else
3448  incr *= 180000.0;
3449  pos += incr;
3450  stream_seek(cur_stream, pos, incr, 1);
3451  } else {
3452  pos = get_master_clock(cur_stream);
3453  if (isnan(pos))
3454  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3455  pos += incr;
3456  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3457  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3458  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3459  }
3460  break;
3461  default:
3462  break;
3463  }
3464  break;
3465  case SDL_MOUSEBUTTONDOWN:
3466  if (exit_on_mousedown) {
3467  do_exit(cur_stream);
3468  break;
3469  }
3470  if (event.button.button == SDL_BUTTON_LEFT) {
3471  static int64_t last_mouse_left_click = 0;
3472  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3473  toggle_full_screen(cur_stream);
3474  cur_stream->force_refresh = 1;
3475  last_mouse_left_click = 0;
3476  } else {
3477  last_mouse_left_click = av_gettime_relative();
3478  }
3479  }
3480  case SDL_MOUSEMOTION:
3481  if (cursor_hidden) {
3482  SDL_ShowCursor(1);
3483  cursor_hidden = 0;
3484  }
3486  if (event.type == SDL_MOUSEBUTTONDOWN) {
3487  if (event.button.button != SDL_BUTTON_RIGHT)
3488  break;
3489  x = event.button.x;
3490  } else {
3491  if (!(event.motion.state & SDL_BUTTON_RMASK))
3492  break;
3493  x = event.motion.x;
3494  }
3495  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3496  uint64_t size = avio_size(cur_stream->ic->pb);
3497  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3498  } else {
3499  int64_t ts;
3500  int ns, hh, mm, ss;
3501  int tns, thh, tmm, tss;
3502  tns = cur_stream->ic->duration / 1000000LL;
3503  thh = tns / 3600;
3504  tmm = (tns % 3600) / 60;
3505  tss = (tns % 60);
3506  frac = x / cur_stream->width;
3507  ns = frac * tns;
3508  hh = ns / 3600;
3509  mm = (ns % 3600) / 60;
3510  ss = (ns % 60);
3512  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3513  hh, mm, ss, thh, tmm, tss);
3514  ts = frac * cur_stream->ic->duration;
3515  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3516  ts += cur_stream->ic->start_time;
3517  stream_seek(cur_stream, ts, 0, 0);
3518  }
3519  break;
3520  case SDL_WINDOWEVENT:
3521  switch (event.window.event) {
3522  case SDL_WINDOWEVENT_SIZE_CHANGED:
3523  screen_width = cur_stream->width = event.window.data1;
3524  screen_height = cur_stream->height = event.window.data2;
3525  if (cur_stream->vis_texture) {
3526  SDL_DestroyTexture(cur_stream->vis_texture);
3527  cur_stream->vis_texture = NULL;
3528  }
3529  if (vk_renderer)
3531  case SDL_WINDOWEVENT_EXPOSED:
3532  cur_stream->force_refresh = 1;
3533  }
3534  break;
3535  case SDL_QUIT:
3536  case FF_QUIT_EVENT:
3537  do_exit(cur_stream);
3538  break;
3539  default:
3540  break;
3541  }
3542  }
3543 }
3544 
3545 static int opt_width(void *optctx, const char *opt, const char *arg)
3546 {
3547  double num;
3548  int ret = parse_number(opt, arg, OPT_TYPE_INT64, 1, INT_MAX, &num);
3549  if (ret < 0)
3550  return ret;
3551 
3552  screen_width = num;
3553  return 0;
3554 }
3555 
3556 static int opt_height(void *optctx, const char *opt, const char *arg)
3557 {
3558  double num;
3559  int ret = parse_number(opt, arg, OPT_TYPE_INT64, 1, INT_MAX, &num);
3560  if (ret < 0)
3561  return ret;
3562 
3563  screen_height = num;
3564  return 0;
3565 }
3566 
3567 static int opt_format(void *optctx, const char *opt, const char *arg)
3568 {
3570  if (!file_iformat) {
3571  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3572  return AVERROR(EINVAL);
3573  }
3574  return 0;
3575 }
3576 
3577 static int opt_sync(void *optctx, const char *opt, const char *arg)
3578 {
3579  if (!strcmp(arg, "audio"))
3581  else if (!strcmp(arg, "video"))
3583  else if (!strcmp(arg, "ext"))
3585  else {
3586  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3587  exit(1);
3588  }
3589  return 0;
3590 }
3591 
3592 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3593 {
3594  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3595  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3596  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT : SHOW_MODE_NONE;
3597 
3598  if (show_mode == SHOW_MODE_NONE) {
3599  double num;
3600  int ret = parse_number(opt, arg, OPT_TYPE_INT, 0, SHOW_MODE_NB-1, &num);
3601  if (ret < 0)
3602  return ret;
3603  show_mode = num;
3604  }
3605  return 0;
3606 }
3607 
3608 static int opt_input_file(void *optctx, const char *filename)
3609 {
3610  if (input_filename) {
3612  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3613  filename, input_filename);
3614  return AVERROR(EINVAL);
3615  }
3616  if (!strcmp(filename, "-"))
3617  filename = "fd:";
3618  input_filename = av_strdup(filename);
3619  if (!input_filename)
3620  return AVERROR(ENOMEM);
3621 
3622  return 0;
3623 }
3624 
3625 static int opt_codec(void *optctx, const char *opt, const char *arg)
3626 {
3627  const char *spec = strchr(opt, ':');
3628  const char **name;
3629  if (!spec) {
3631  "No media specifier was specified in '%s' in option '%s'\n",
3632  arg, opt);
3633  return AVERROR(EINVAL);
3634  }
3635  spec++;
3636 
3637  switch (spec[0]) {
3638  case 'a' : name = &audio_codec_name; break;
3639  case 's' : name = &subtitle_codec_name; break;
3640  case 'v' : name = &video_codec_name; break;
3641  default:
3643  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3644  return AVERROR(EINVAL);
3645  }
3646 
3647  av_freep(name);
3648  *name = av_strdup(arg);
3649  return *name ? 0 : AVERROR(ENOMEM);
3650 }
3651 
3652 static int dummy;
3653 
3654 static const OptionDef options[] = {
3656  { "x", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3657  { "y", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3658  { "fs", OPT_TYPE_BOOL, 0, { &is_full_screen }, "force full screen" },
3659  { "an", OPT_TYPE_BOOL, 0, { &audio_disable }, "disable audio" },
3660  { "vn", OPT_TYPE_BOOL, 0, { &video_disable }, "disable video" },
3661  { "sn", OPT_TYPE_BOOL, 0, { &subtitle_disable }, "disable subtitling" },
3662  { "ast", OPT_TYPE_STRING, OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3663  { "vst", OPT_TYPE_STRING, OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3664  { "sst", OPT_TYPE_STRING, OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3665  { "ss", OPT_TYPE_TIME, 0, { &start_time }, "seek to a given position in seconds", "pos" },
3666  { "t", OPT_TYPE_TIME, 0, { &duration }, "play \"duration\" seconds of audio/video", "duration" },
3667  { "bytes", OPT_TYPE_INT, 0, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3668  { "seek_interval", OPT_TYPE_FLOAT, 0, { &seek_interval }, "set seek interval for left/right keys, in seconds", "seconds" },
3669  { "nodisp", OPT_TYPE_BOOL, 0, { &display_disable }, "disable graphical display" },
3670  { "noborder", OPT_TYPE_BOOL, 0, { &borderless }, "borderless window" },
3671  { "alwaysontop", OPT_TYPE_BOOL, 0, { &alwaysontop }, "window always on top" },
3672  { "volume", OPT_TYPE_INT, 0, { &startup_volume}, "set startup volume 0=min 100=max", "volume" },
3673  { "f", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3674  { "stats", OPT_TYPE_BOOL, OPT_EXPERT, { &show_status }, "show status", "" },
3675  { "fast", OPT_TYPE_BOOL, OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3676  { "genpts", OPT_TYPE_BOOL, OPT_EXPERT, { &genpts }, "generate pts", "" },
3677  { "drp", OPT_TYPE_INT, OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3678  { "lowres", OPT_TYPE_INT, OPT_EXPERT, { &lowres }, "", "" },
3679  { "sync", OPT_TYPE_FUNC, OPT_FUNC_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3680  { "autoexit", OPT_TYPE_BOOL, OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3681  { "exitonkeydown", OPT_TYPE_BOOL, OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3682  { "exitonmousedown", OPT_TYPE_BOOL, OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3683  { "loop", OPT_TYPE_INT, OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3684  { "framedrop", OPT_TYPE_BOOL, OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3685  { "infbuf", OPT_TYPE_BOOL, OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3686  { "window_title", OPT_TYPE_STRING, 0, { &window_title }, "set window title", "window title" },
3687  { "left", OPT_TYPE_INT, OPT_EXPERT, { &screen_left }, "set the x position for the left of the window", "x pos" },
3688  { "top", OPT_TYPE_INT, OPT_EXPERT, { &screen_top }, "set the y position for the top of the window", "y pos" },
3689  { "vf", OPT_TYPE_FUNC, OPT_FUNC_ARG | OPT_EXPERT, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3690  { "af", OPT_TYPE_STRING, 0, { &afilters }, "set audio filters", "filter_graph" },
3691  { "rdftspeed", OPT_TYPE_INT, OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3692  { "showmode", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3693  { "i", OPT_TYPE_BOOL, 0, { &dummy}, "read specified file", "input_file"},
3694  { "codec", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3695  { "acodec", OPT_TYPE_STRING, OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3696  { "scodec", OPT_TYPE_STRING, OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3697  { "vcodec", OPT_TYPE_STRING, OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3698  { "autorotate", OPT_TYPE_BOOL, 0, { &autorotate }, "automatically rotate video", "" },
3699  { "find_stream_info", OPT_TYPE_BOOL, OPT_INPUT | OPT_EXPERT, { &find_stream_info },
3700  "read and decode the streams to fill missing information with heuristics" },
3701  { "filter_threads", OPT_TYPE_INT, OPT_EXPERT, { &filter_nbthreads }, "number of filter threads per graph" },
3702  { "enable_vulkan", OPT_TYPE_BOOL, 0, { &enable_vulkan }, "enable vulkan renderer" },
3703  { "vulkan_params", OPT_TYPE_STRING, OPT_EXPERT, { &vulkan_params }, "vulkan configuration using a list of key=value pairs separated by ':'" },
3704  { "hwaccel", OPT_TYPE_STRING, OPT_EXPERT, { &hwaccel }, "use HW accelerated decoding" },
3705  { NULL, },
3706 };
3707 
3708 static void show_usage(void)
3709 {
3710  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3711  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3712  av_log(NULL, AV_LOG_INFO, "\n");
3713 }
3714 
3715 void show_help_default(const char *opt, const char *arg)
3716 {
3718  show_usage();
3719  show_help_options(options, "Main options:", 0, OPT_EXPERT);
3720  show_help_options(options, "Advanced options:", OPT_EXPERT, 0);
3721  printf("\n");
3725  printf("\nWhile playing:\n"
3726  "q, ESC quit\n"
3727  "f toggle full screen\n"
3728  "p, SPC pause\n"
3729  "m toggle mute\n"
3730  "9, 0 decrease and increase volume respectively\n"
3731  "/, * decrease and increase volume respectively\n"
3732  "a cycle audio channel in the current program\n"
3733  "v cycle video channel\n"
3734  "t cycle subtitle channel in the current program\n"
3735  "c cycle program\n"
3736  "w cycle video filters or show modes\n"
3737  "s activate frame-step mode\n"
3738  "left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
3739  "down/up seek backward/forward 1 minute\n"
3740  "page down/page up seek backward/forward 10 minutes\n"
3741  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3742  "left double-click toggle full screen\n"
3743  );
3744 }
3745 
3746 /* Called from the main */
3747 int main(int argc, char **argv)
3748 {
3749  int flags, ret;
3750  VideoState *is;
3751 
3752  init_dynload();
3753 
3755  parse_loglevel(argc, argv, options);
3756 
3757  /* register all codecs, demux and protocols */
3758 #if CONFIG_AVDEVICE
3760 #endif
3762 
3763  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3764  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3765 
3766  show_banner(argc, argv, options);
3767 
3768  ret = parse_options(NULL, argc, argv, options, opt_input_file);
3769  if (ret < 0)
3770  exit(ret == AVERROR_EXIT ? 0 : 1);
3771 
3772  if (!input_filename) {
3773  show_usage();
3774  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3776  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3777  exit(1);
3778  }
3779 
3780  if (display_disable) {
3781  video_disable = 1;
3782  }
3783  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3784  if (audio_disable)
3785  flags &= ~SDL_INIT_AUDIO;
3786  else {
3787  /* Try to work around an occasional ALSA buffer underflow issue when the
3788  * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3789  if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3790  SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
3791  }
3792  if (display_disable)
3793  flags &= ~SDL_INIT_VIDEO;
3794  if (SDL_Init (flags)) {
3795  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3796  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3797  exit(1);
3798  }
3799 
3800  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3801  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3802 
3803  if (!display_disable) {
3804  int flags = SDL_WINDOW_HIDDEN;
3805  if (alwaysontop)
3806 #if SDL_VERSION_ATLEAST(2,0,5)
3807  flags |= SDL_WINDOW_ALWAYS_ON_TOP;
3808 #else
3809  av_log(NULL, AV_LOG_WARNING, "Your SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.\n");
3810 #endif
3811  if (borderless)
3812  flags |= SDL_WINDOW_BORDERLESS;
3813  else
3814  flags |= SDL_WINDOW_RESIZABLE;
3815 
3816 #ifdef SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR
3817  SDL_SetHint(SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR, "0");
3818 #endif
3819  if (hwaccel && !enable_vulkan) {
3820  av_log(NULL, AV_LOG_INFO, "Enable vulkan renderer to support hwaccel %s\n", hwaccel);
3821  enable_vulkan = 1;
3822  }
3823  if (enable_vulkan) {
3825  if (vk_renderer) {
3826 #if SDL_VERSION_ATLEAST(2, 0, 6)
3827  flags |= SDL_WINDOW_VULKAN;
3828 #endif
3829  } else {
3830  av_log(NULL, AV_LOG_WARNING, "Doesn't support vulkan renderer, fallback to SDL renderer\n");
3831  enable_vulkan = 0;
3832  }
3833  }
3834  window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
3835  SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
3836  if (!window) {
3837  av_log(NULL, AV_LOG_FATAL, "Failed to create window: %s", SDL_GetError());
3838  do_exit(NULL);
3839  }
3840 
3841  if (vk_renderer) {
3842  AVDictionary *dict = NULL;
3843 
3844  if (vulkan_params)
3845  av_dict_parse_string(&dict, vulkan_params, "=", ":", 0);
3847  av_dict_free(&dict);
3848  if (ret < 0) {
3849  av_log(NULL, AV_LOG_FATAL, "Failed to create vulkan renderer, %s\n", av_err2str(ret));
3850  do_exit(NULL);
3851  }
3852  } else {
3853  renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
3854  if (!renderer) {
3855  av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
3856  renderer = SDL_CreateRenderer(window, -1, 0);
3857  }
3858  if (renderer) {
3859  if (!SDL_GetRendererInfo(renderer, &renderer_info))
3860  av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", renderer_info.name);
3861  }
3862  if (!renderer || !renderer_info.num_texture_formats) {
3863  av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError());
3864  do_exit(NULL);
3865  }
3866  }
3867  }
3868 
3870  if (!is) {
3871  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3872  do_exit(NULL);
3873  }
3874 
3875  event_loop(is);
3876 
3877  /* never returns */
3878 
3879  return 0;
3880 }
AVSubtitle
Definition: avcodec.h:2273
rect::w
int w
Definition: f_ebur128.c:78
sws_getCachedContext
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2500
do_exit
static void do_exit(VideoState *is)
Definition: ffplay.c:1306
VideoState::seek_rel
int64_t seek_rel
Definition: ffplay.c:215
AV_PKT_DATA_DISPLAYMATRIX
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:109
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:427
show_help_options
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:110
AVCodec
AVCodec.
Definition: codec.h:187
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
VideoState::video_st
AVStream * video_st
Definition: ffplay.c:285
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:656
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:234
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
Frame::width
int width
Definition: ffplay.c:162
AVFMT_NO_BYTE_SEEK
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:488
av_clip
#define av_clip
Definition: common.h:98
VideoState::rdft
AVTXContext * rdft
Definition: ffplay.c:266
AudioParams::fmt
enum AVSampleFormat fmt
Definition: ffplay.c:135
av_sync_type
static int av_sync_type
Definition: ffplay.c:328
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
renderer_info
static SDL_RendererInfo renderer_info
Definition: ffplay.c:366
FrameData::pkt_pos
int64_t pkt_pos
Definition: ffplay.c:151
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1118
frame_queue_nb_remaining
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:805
VideoState::agraph
AVFilterGraph * agraph
Definition: ffplay.c:300
configure_audio_filters
static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
Definition: ffplay.c:1994
opt_add_vfilter
static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:397
frame_queue_next
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:789
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:51
Decoder::finished
int finished
Definition: ffplay.c:194
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:856
av_compare_ts
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
FrameData
Definition: ffmpeg.h:616
AVProgram::nb_stream_indexes
unsigned int nb_stream_indexes
Definition: avformat.h:1292
frame_queue_last_pos
static int64_t frame_queue_last_pos(FrameQueue *f)
Definition: ffplay.c:811
av_find_best_stream
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, const AVCodec **decoder_ret, int flags)
Definition: avformat.c:433
out
FILE * out
Definition: movenc.c:54
VideoState::rdft_fn
av_tx_fn rdft_fn
Definition: ffplay.c:267
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1068
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
video_thread
static int video_thread(void *arg)
Definition: ffplay.c:2157
VideoState::av_sync_type
int av_sync_type
Definition: ffplay.c:234
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:824
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
set_default_window_size
static void set_default_window_size(int width, int height, AVRational sar)
Definition: ffplay.c:1338
AV_PIX_FMT_BGR32
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:457
AV_NOSYNC_THRESHOLD
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:87
AVFormatContext::nb_chapters
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1580
AVCodecParameters
This struct describes the properties of an encoded stream.
Definition: codec_par.h:47
AV_LOG_QUIET
#define AV_LOG_QUIET
Print no output.
Definition: log.h:162
FrameQueue::keep_last
int keep_last
Definition: ffplay.c:176
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:947
VideoState::audio_hw_buf_size
int audio_hw_buf_size
Definition: ffplay.c:244
decoder_decode_frame
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
Definition: ffplay.c:582
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_DISPOSITION_ATTACHED_PIC
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:772
FrameQueue::cond
SDL_cond * cond
Definition: ffplay.c:179
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:151
AVStream::discard
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:912
av_find_program_from_stream
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: avformat.c:382
display_disable
static int display_disable
Definition: ffplay.c:323
screen_width
static int screen_width
Definition: ffplay.c:313
ffplay_renderer.h
DEBUG
#define DEBUG
Definition: vf_framerate.c:29
sws_dict
AVDictionary * sws_dict
Definition: cmdutils.c:59
swr_set_compensation
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:1022
SAMPLE_ARRAY_SIZE
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:105
AVTXContext
Definition: tx_priv.h:235
rect
Definition: f_ebur128.c:78
update_volume
static void update_volume(VideoState *is, int sign, double step)
Definition: ffplay.c:1524
AVFMT_NOTIMESTAMPS
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:480
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:264
VideoState::auddec
Decoder auddec
Definition: ffplay.c:228
screen_left
static int screen_left
Definition: ffplay.c:315
av_opt_set_int_list
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:721
AudioParams::frame_size
int frame_size
Definition: ffplay.c:136
AVSubtitleRect
Definition: avcodec.h:2245
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
Decoder::next_pts
int64_t next_pts
Definition: ffplay.c:199
decoder_start
static int decoder_start(Decoder *d, int(*fn)(void *), const char *thread_name, void *arg)
Definition: ffplay.c:2146
rect::y
int y
Definition: f_ebur128.c:78
FrameQueue::size
int size
Definition: ffplay.c:174
avformat_get_class
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:206
av_unused
#define av_unused
Definition: attributes.h:131
normalize.log
log
Definition: normalize.py:21
Frame::sar
AVRational sar
Definition: ffplay.c:165
out_size
int out_size
Definition: movenc.c:55
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
VideoState::vis_texture
SDL_Texture * vis_texture
Definition: ffplay.c:273
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:667
queue_picture
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
Definition: ffplay.c:1758
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
AudioParams
Definition: ffplay.c:132
VideoState::subtitle_st
AVStream * subtitle_st
Definition: ffplay.c:278
VideoState::audio_filter_src
struct AudioParams audio_filter_src
Definition: ffplay.c:254
pixdesc.h
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1431
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:452
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
VideoState::frame_last_filter_delay
double frame_last_filter_delay
Definition: ffplay.c:283
AVFrame::width
int width
Definition: frame.h:412
VideoState::xleft
int xleft
Definition: ffplay.c:292
AVPacketSideData
This structure stores auxiliary information for decoding, presenting, or otherwise processing the cod...
Definition: packet.h:373
Frame::pts
double pts
Definition: ffplay.c:159
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:687
OPT_INPUT
#define OPT_INPUT
Definition: cmdutils.h:168
frame_queue_init
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
Definition: ffplay.c:692
subtitle_codec_name
static const char * subtitle_codec_name
Definition: ffplay.c:343
parse_number
int parse_number(const char *context, const char *numstr, enum OptionType type, double min, double max, double *dst)
Parse a string and return its corresponding value as a double.
Definition: cmdutils.c:87
AV_HWDEVICE_TYPE_NONE
@ AV_HWDEVICE_TYPE_NONE
Definition: hwcontext.h:28
EXTERNAL_CLOCK_MIN_FRAMES
#define EXTERNAL_CLOCK_MIN_FRAMES
Definition: ffplay.c:69
b
#define b
Definition: input.c:41
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:184
vk_renderer_create
int vk_renderer_create(VkRenderer *renderer, SDL_Window *window, AVDictionary *opt)
Definition: ffplay_renderer.c:811
AVChapter::start
int64_t start
Definition: avformat.h:1325
Clock
Definition: ffplay.c:140
data
const char data[16]
Definition: mxf.c:148
SAMPLE_QUEUE_SIZE
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:129
program_name
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:64
Decoder::queue
PacketQueue * queue
Definition: ffplay.c:191
format_opts
AVDictionary * format_opts
Definition: cmdutils.c:61
AVSEEK_FLAG_BYTE
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2549
AVIOContext::error
int error
contains the error code or 0 if no error happened
Definition: avio.h:249
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:75
AV_PIX_FMT_RGB32_1
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:456
VideoState::audio_diff_avg_coef
double audio_diff_avg_coef
Definition: ffplay.c:239
av_hwdevice_find_type_by_name
enum AVHWDeviceType av_hwdevice_find_type_by_name(const char *name)
Look up an AVHWDeviceType by name.
Definition: hwcontext.c:87
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
CURSOR_HIDE_DELAY
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:107
SDL_VOLUME_STEP
#define SDL_VOLUME_STEP
Definition: ffplay.c:78
AVComplexFloat
Definition: tx.h:27
show_help_children
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
Definition: cmdutils.c:143
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:102
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
autorotate
static int autorotate
Definition: ffplay.c:351
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:540
TextureFormatEntry::texture_fmt
int texture_fmt
Definition: ffplay.c:373
video_disable
static int video_disable
Definition: ffplay.c:318
Frame::uploaded
int uploaded
Definition: ffplay.c:166
mathematics.h
sws_scale
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:1205
AVDictionary
Definition: dict.c:34
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:318
compute_target_delay
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1539
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Frame
Definition: ffplay.c:155
opt_input_file
static int opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3608
stream_close
static void stream_close(VideoState *is)
Definition: ffplay.c:1270
av_read_frame
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: demux.c:1558
SDL_AUDIO_MAX_CALLBACKS_PER_SEC
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:75
vk_renderer_destroy
void vk_renderer_destroy(VkRenderer *renderer)
Definition: ffplay_renderer.c:832
VideoState::paused
int paused
Definition: ffplay.c:209
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:323
init_clock
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1420
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
AV_OPT_FLAG_FILTERING_PARAM
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:297
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:370
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
get_master_clock
static double get_master_clock(VideoState *is)
Definition: ffplay.c:1453
VideoState::width
int width
Definition: ffplay.c:292
file_iformat
static const AVInputFormat * file_iformat
Definition: ffplay.c:308
sample_rate
sample_rate
Definition: ffmpeg_filter.c:424
dummy
static int dummy
Definition: ffplay.c:3652
FF_QUIT_EVENT
#define FF_QUIT_EVENT
Definition: ffplay.c:362
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:74
PacketQueue
Definition: ffplay.c:116
av_tx_init
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
Definition: tx.c:902
subtitle_thread
static int subtitle_thread(void *arg)
Definition: ffplay.c:2260
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:793
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
VideoState::last_subtitle_stream
int last_subtitle_stream
Definition: ffplay.c:302
VideoState::SHOW_MODE_NONE
@ SHOW_MODE_NONE
Definition: ffplay.c:261
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:116
OptionDef
Definition: cmdutils.h:126
audio_decode_frame
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2363
subtitle_disable
static int subtitle_disable
Definition: ffplay.c:319
VideoState::pictq
FrameQueue pictq
Definition: ffplay.c:224
genpts
static int genpts
Definition: ffplay.c:332
VideoState::swr_ctx
struct SwrContext * swr_ctx
Definition: ffplay.c:256
opt_sync
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3577
AVFrame::opaque_ref
AVBufferRef * opaque_ref
Frame owner's private data.
Definition: frame.h:768
step_to_next_frame
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1531
upload_texture
static int upload_texture(SDL_Texture **tex, AVFrame *frame)
Definition: ffplay.c:909
VideoState::sampq
FrameQueue sampq
Definition: ffplay.c:226
TextureFormatEntry::format
enum AVPixelFormat format
Definition: ffplay.c:372
FrameQueue::rindex
int rindex
Definition: ffplay.c:172
video_display
static void video_display(VideoState *is)
Definition: ffplay.c:1374
AVCodec::max_lowres
uint8_t max_lowres
maximum value for lowres supported by the decoder
Definition: codec.h:207
avformat_close_input
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: demux.c:374
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:619
AVFormatContext::interrupt_callback
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1629
SDL_AUDIO_MIN_BUFFER_SIZE
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:73
print_error
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:898
startup_volume
static int startup_volume
Definition: ffplay.c:326
window
static SDL_Window * window
Definition: ffplay.c:364
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
Definition: avfiltergraph.c:137
fifo.h
toggle_full_screen
static void toggle_full_screen(VideoState *is)
Definition: ffplay.c:3285
Clock::queue_serial
int * queue_serial
Definition: ffplay.c:147
VideoState::extclk
Clock extclk
Definition: ffplay.c:222
VideoState::seek_flags
int seek_flags
Definition: ffplay.c:213
alwaysontop
static int alwaysontop
Definition: ffplay.c:325
VideoState::audio_st
AVStream * audio_st
Definition: ffplay.c:242
packet_queue_init
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:473
AUDIO_DIFF_AVG_NB
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:98
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:2111
fail
#define fail()
Definition: checkasm.h:179
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:82
FrameQueue
Definition: ffplay.c:170
packet_queue_put
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:444
samplefmt.h
AVSubtitleRect::x
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2246
VideoState::video_stream
int video_stream
Definition: ffplay.c:284
autoexit
static int autoexit
Definition: ffplay.c:335
avio_tell
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:513
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
AVChapter
Definition: avformat.h:1322
video_image_display
static void video_image_display(VideoState *is)
Definition: ffplay.c:966
val
static double val(void *priv, double ch)
Definition: aeval.c:78
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:802
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
opt_show_mode
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3592
Decoder::empty_queue_cond
SDL_cond * empty_queue_cond
Definition: ffplay.c:196
pts
static int64_t pts
Definition: transcode_aac.c:643
set_clock_speed
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1414
VideoState::audio_diff_threshold
double audio_diff_threshold
Definition: ffplay.c:240
OPT_TYPE_FLOAT
@ OPT_TYPE_FLOAT
Definition: cmdutils.h:86
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:497
VideoState::audio_diff_cum
double audio_diff_cum
Definition: ffplay.c:238
VideoState::last_video_stream
int last_video_stream
Definition: ffplay.c:302
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
fast
static int fast
Definition: ffplay.c:331
loop
static int loop
Definition: ffplay.c:338
AVRational::num
int num
Numerator.
Definition: rational.h:59
avformat_network_init
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:558
VideoState::rdft_bits
int rdft_bits
Definition: ffplay.c:268
opt_height
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3556
avsubtitle_free
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: avcodec.c:413
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:88
sdl_texture_format_map
static const struct TextureFormatEntry sdl_texture_format_map[]
AVFormatContext::bit_rate
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1472
is_full_screen
static int is_full_screen
Definition: ffplay.c:359
avcodec_decode_subtitle2
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, const AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:969
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:76
set_sdl_yuv_conversion_mode
static void set_sdl_yuv_conversion_mode(AVFrame *frame)
Definition: ffplay.c:950
lrint
#define lrint
Definition: tablegen.h:53
Frame::flip_v
int flip_v
Definition: ffplay.c:167
AV_SYNC_AUDIO_MASTER
@ AV_SYNC_AUDIO_MASTER
Definition: ffplay.c:184
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:206
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
AVFormatContext::metadata
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1591
vk_get_renderer
VkRenderer * vk_get_renderer(void)
Definition: ffplay_renderer.c:804
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
AVInputFormat
Definition: avformat.h:549
AV_SYNC_VIDEO_MASTER
@ AV_SYNC_VIDEO_MASTER
Definition: ffplay.c:185
audio_thread
static int audio_thread(void *arg)
Definition: ffplay.c:2068
set_clock
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1408
av_dump_format
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
Definition: dump.c:785
VideoState
Definition: ffplay.c:204
frame_queue_peek_next
static Frame * frame_queue_peek_next(FrameQueue *f)
Definition: ffplay.c:737
sdl_audio_callback
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
Definition: