FFmpeg
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include "config_components.h"
28 #include <inttypes.h>
29 #include <math.h>
30 #include <limits.h>
31 #include <signal.h>
32 #include <stdint.h>
33 
34 #include "libavutil/avstring.h"
36 #include "libavutil/eval.h"
37 #include "libavutil/mathematics.h"
38 #include "libavutil/pixdesc.h"
39 #include "libavutil/imgutils.h"
40 #include "libavutil/dict.h"
41 #include "libavutil/fifo.h"
42 #include "libavutil/parseutils.h"
43 #include "libavutil/samplefmt.h"
44 #include "libavutil/time.h"
45 #include "libavutil/bprint.h"
46 #include "libavformat/avformat.h"
47 #include "libavdevice/avdevice.h"
48 #include "libswscale/swscale.h"
49 #include "libavutil/opt.h"
50 #include "libavutil/tx.h"
52 
53 #include "libavfilter/avfilter.h"
54 #include "libavfilter/buffersink.h"
55 #include "libavfilter/buffersrc.h"
56 
57 #include <SDL.h>
58 #include <SDL_thread.h>
59 
60 #include "cmdutils.h"
61 #include "ffplay_renderer.h"
62 #include "opt_common.h"
63 
64 const char program_name[] = "ffplay";
65 const int program_birth_year = 2003;
66 
67 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
68 #define MIN_FRAMES 25
69 #define EXTERNAL_CLOCK_MIN_FRAMES 2
70 #define EXTERNAL_CLOCK_MAX_FRAMES 10
71 
72 /* Minimum SDL audio buffer size, in samples. */
73 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
74 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
75 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
76 
77 /* Step size for volume control in dB */
78 #define SDL_VOLUME_STEP (0.75)
79 
80 /* no AV sync correction is done if below the minimum AV sync threshold */
81 #define AV_SYNC_THRESHOLD_MIN 0.04
82 /* AV sync correction is done if above the maximum AV sync threshold */
83 #define AV_SYNC_THRESHOLD_MAX 0.1
84 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
85 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
86 /* no AV correction is done if too big error */
87 #define AV_NOSYNC_THRESHOLD 10.0
88 
89 /* maximum audio speed change to get correct sync */
90 #define SAMPLE_CORRECTION_PERCENT_MAX 10
91 
92 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
93 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
94 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
95 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
96 
97 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
98 #define AUDIO_DIFF_AVG_NB 20
99 
100 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
101 #define REFRESH_RATE 0.01
102 
103 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
104 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
105 #define SAMPLE_ARRAY_SIZE (8 * 65536)
106 
107 #define CURSOR_HIDE_DELAY 1000000
108 
109 #define USE_ONEPASS_SUBTITLE_RENDER 1
110 
111 typedef struct MyAVPacketList {
113  int serial;
115 
116 typedef struct PacketQueue {
119  int size;
120  int64_t duration;
122  int serial;
123  SDL_mutex *mutex;
124  SDL_cond *cond;
125 } PacketQueue;
126 
127 #define VIDEO_PICTURE_QUEUE_SIZE 3
128 #define SUBPICTURE_QUEUE_SIZE 16
129 #define SAMPLE_QUEUE_SIZE 9
130 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
131 
132 typedef struct AudioParams {
133  int freq;
138 } AudioParams;
139 
140 typedef struct Clock {
141  double pts; /* clock base */
142  double pts_drift; /* clock base minus time at which we updated the clock */
143  double last_updated;
144  double speed;
145  int serial; /* clock is based on a packet with this serial */
146  int paused;
147  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
148 } Clock;
149 
150 typedef struct FrameData {
151  int64_t pkt_pos;
152 } FrameData;
153 
154 /* Common struct for handling all types of decoded data and allocated render buffers. */
155 typedef struct Frame {
158  int serial;
159  double pts; /* presentation timestamp for the frame */
160  double duration; /* estimated duration of the frame */
161  int64_t pos; /* byte position of the frame in the input file */
162  int width;
163  int height;
164  int format;
166  int uploaded;
167  int flip_v;
168 } Frame;
169 
170 typedef struct FrameQueue {
172  int rindex;
173  int windex;
174  int size;
175  int max_size;
178  SDL_mutex *mutex;
179  SDL_cond *cond;
181 } FrameQueue;
182 
183 enum {
184  AV_SYNC_AUDIO_MASTER, /* default choice */
186  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
187 };
188 
189 typedef struct Decoder {
190  AVPacket *pkt;
194  int finished;
196  SDL_cond *empty_queue_cond;
197  int64_t start_pts;
199  int64_t next_pts;
201  SDL_Thread *decoder_tid;
202 } Decoder;
203 
204 typedef struct VideoState {
205  SDL_Thread *read_tid;
209  int paused;
212  int seek_req;
214  int64_t seek_pos;
215  int64_t seek_rel;
218  int realtime;
219 
223 
227 
231 
233 
235 
236  double audio_clock;
238  double audio_diff_cum; /* used for AV difference average computation */
245  uint8_t *audio_buf;
246  uint8_t *audio_buf1;
247  unsigned int audio_buf_size; /* in bytes */
248  unsigned int audio_buf1_size;
249  int audio_buf_index; /* in bytes */
252  int muted;
259 
260  enum ShowMode {
262  } show_mode;
269  float *real_data;
271  int xpos;
273  SDL_Texture *vis_texture;
274  SDL_Texture *sub_texture;
275  SDL_Texture *vid_texture;
276 
280 
281  double frame_timer;
287  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
289  int eof;
290 
291  char *filename;
293  int step;
294 
296  AVFilterContext *in_video_filter; // the first filter in the video chain
297  AVFilterContext *out_video_filter; // the last filter in the video chain
298  AVFilterContext *in_audio_filter; // the first filter in the audio chain
299  AVFilterContext *out_audio_filter; // the last filter in the audio chain
300  AVFilterGraph *agraph; // audio filter graph
301 
303 
305 } VideoState;
306 
307 /* options specified by the user */
309 static const char *input_filename;
310 static const char *window_title;
311 static int default_width = 640;
312 static int default_height = 480;
313 static int screen_width = 0;
314 static int screen_height = 0;
315 static int screen_left = SDL_WINDOWPOS_CENTERED;
316 static int screen_top = SDL_WINDOWPOS_CENTERED;
317 static int audio_disable;
318 static int video_disable;
319 static int subtitle_disable;
320 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
321 static int seek_by_bytes = -1;
322 static float seek_interval = 10;
323 static int display_disable;
324 static int borderless;
325 static int alwaysontop;
326 static int startup_volume = 100;
327 static int show_status = -1;
329 static int64_t start_time = AV_NOPTS_VALUE;
330 static int64_t duration = AV_NOPTS_VALUE;
331 static int fast = 0;
332 static int genpts = 0;
333 static int lowres = 0;
334 static int decoder_reorder_pts = -1;
335 static int autoexit;
336 static int exit_on_keydown;
337 static int exit_on_mousedown;
338 static int loop = 1;
339 static int framedrop = -1;
340 static int infinite_buffer = -1;
341 static enum ShowMode show_mode = SHOW_MODE_NONE;
342 static const char *audio_codec_name;
343 static const char *subtitle_codec_name;
344 static const char *video_codec_name;
345 double rdftspeed = 0.02;
346 static int64_t cursor_last_shown;
347 static int cursor_hidden = 0;
348 static const char **vfilters_list = NULL;
349 static int nb_vfilters = 0;
350 static char *afilters = NULL;
351 static int autorotate = 1;
352 static int find_stream_info = 1;
353 static int filter_nbthreads = 0;
354 static int enable_vulkan = 0;
355 static char *vulkan_params = NULL;
356 static const char *hwaccel = NULL;
357 
358 /* current context */
359 static int is_full_screen;
360 static int64_t audio_callback_time;
361 
362 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
363 
364 static SDL_Window *window;
365 static SDL_Renderer *renderer;
366 static SDL_RendererInfo renderer_info = {0};
367 static SDL_AudioDeviceID audio_dev;
368 
370 
371 static const struct TextureFormatEntry {
375  { AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
376  { AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
377  { AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
378  { AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
379  { AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
380  { AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
381  { AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
382  { AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
383  { AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
384  { AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
385  { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
386  { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
387  { AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
388  { AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
389  { AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
390  { AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
391  { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
392  { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
393  { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
394  { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
395 };
396 
397 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
398 {
400  if (ret < 0)
401  return ret;
402 
404  return 0;
405 }
406 
407 static inline
408 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
409  enum AVSampleFormat fmt2, int64_t channel_count2)
410 {
411  /* If channel count == 1, planar and non-planar formats are the same */
412  if (channel_count1 == 1 && channel_count2 == 1)
414  else
415  return channel_count1 != channel_count2 || fmt1 != fmt2;
416 }
417 
419 {
420  MyAVPacketList pkt1;
421  int ret;
422 
423  if (q->abort_request)
424  return -1;
425 
426 
427  pkt1.pkt = pkt;
428  pkt1.serial = q->serial;
429 
430  ret = av_fifo_write(q->pkt_list, &pkt1, 1);
431  if (ret < 0)
432  return ret;
433  q->nb_packets++;
434  q->size += pkt1.pkt->size + sizeof(pkt1);
435  q->duration += pkt1.pkt->duration;
436  /* XXX: should duplicate packet data in DV case */
437  SDL_CondSignal(q->cond);
438  return 0;
439 }
440 
442 {
443  AVPacket *pkt1;
444  int ret;
445 
446  pkt1 = av_packet_alloc();
447  if (!pkt1) {
449  return -1;
450  }
451  av_packet_move_ref(pkt1, pkt);
452 
453  SDL_LockMutex(q->mutex);
454  ret = packet_queue_put_private(q, pkt1);
455  SDL_UnlockMutex(q->mutex);
456 
457  if (ret < 0)
458  av_packet_free(&pkt1);
459 
460  return ret;
461 }
462 
463 static int packet_queue_put_nullpacket(PacketQueue *q, AVPacket *pkt, int stream_index)
464 {
465  pkt->stream_index = stream_index;
466  return packet_queue_put(q, pkt);
467 }
468 
469 /* packet queue handling */
471 {
472  memset(q, 0, sizeof(PacketQueue));
474  if (!q->pkt_list)
475  return AVERROR(ENOMEM);
476  q->mutex = SDL_CreateMutex();
477  if (!q->mutex) {
478  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
479  return AVERROR(ENOMEM);
480  }
481  q->cond = SDL_CreateCond();
482  if (!q->cond) {
483  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
484  return AVERROR(ENOMEM);
485  }
486  q->abort_request = 1;
487  return 0;
488 }
489 
491 {
492  MyAVPacketList pkt1;
493 
494  SDL_LockMutex(q->mutex);
495  while (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0)
496  av_packet_free(&pkt1.pkt);
497  q->nb_packets = 0;
498  q->size = 0;
499  q->duration = 0;
500  q->serial++;
501  SDL_UnlockMutex(q->mutex);
502 }
503 
505 {
508  SDL_DestroyMutex(q->mutex);
509  SDL_DestroyCond(q->cond);
510 }
511 
513 {
514  SDL_LockMutex(q->mutex);
515 
516  q->abort_request = 1;
517 
518  SDL_CondSignal(q->cond);
519 
520  SDL_UnlockMutex(q->mutex);
521 }
522 
524 {
525  SDL_LockMutex(q->mutex);
526  q->abort_request = 0;
527  q->serial++;
528  SDL_UnlockMutex(q->mutex);
529 }
530 
531 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
532 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
533 {
534  MyAVPacketList pkt1;
535  int ret;
536 
537  SDL_LockMutex(q->mutex);
538 
539  for (;;) {
540  if (q->abort_request) {
541  ret = -1;
542  break;
543  }
544 
545  if (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0) {
546  q->nb_packets--;
547  q->size -= pkt1.pkt->size + sizeof(pkt1);
548  q->duration -= pkt1.pkt->duration;
549  av_packet_move_ref(pkt, pkt1.pkt);
550  if (serial)
551  *serial = pkt1.serial;
552  av_packet_free(&pkt1.pkt);
553  ret = 1;
554  break;
555  } else if (!block) {
556  ret = 0;
557  break;
558  } else {
559  SDL_CondWait(q->cond, q->mutex);
560  }
561  }
562  SDL_UnlockMutex(q->mutex);
563  return ret;
564 }
565 
566 static int decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
567  memset(d, 0, sizeof(Decoder));
568  d->pkt = av_packet_alloc();
569  if (!d->pkt)
570  return AVERROR(ENOMEM);
571  d->avctx = avctx;
572  d->queue = queue;
573  d->empty_queue_cond = empty_queue_cond;
574  d->start_pts = AV_NOPTS_VALUE;
575  d->pkt_serial = -1;
576  return 0;
577 }
578 
580  int ret = AVERROR(EAGAIN);
581 
582  for (;;) {
583  if (d->queue->serial == d->pkt_serial) {
584  do {
585  if (d->queue->abort_request)
586  return -1;
587 
588  switch (d->avctx->codec_type) {
589  case AVMEDIA_TYPE_VIDEO:
590  ret = avcodec_receive_frame(d->avctx, frame);
591  if (ret >= 0) {
592  if (decoder_reorder_pts == -1) {
594  } else if (!decoder_reorder_pts) {
595  frame->pts = frame->pkt_dts;
596  }
597  }
598  break;
599  case AVMEDIA_TYPE_AUDIO:
600  ret = avcodec_receive_frame(d->avctx, frame);
601  if (ret >= 0) {
603  if (frame->pts != AV_NOPTS_VALUE)
604  frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);
605  else if (d->next_pts != AV_NOPTS_VALUE)
606  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
607  if (frame->pts != AV_NOPTS_VALUE) {
608  d->next_pts = frame->pts + frame->nb_samples;
609  d->next_pts_tb = tb;
610  }
611  }
612  break;
613  }
614  if (ret == AVERROR_EOF) {
615  d->finished = d->pkt_serial;
616  avcodec_flush_buffers(d->avctx);
617  return 0;
618  }
619  if (ret >= 0)
620  return 1;
621  } while (ret != AVERROR(EAGAIN));
622  }
623 
624  do {
625  if (d->queue->nb_packets == 0)
626  SDL_CondSignal(d->empty_queue_cond);
627  if (d->packet_pending) {
628  d->packet_pending = 0;
629  } else {
630  int old_serial = d->pkt_serial;
631  if (packet_queue_get(d->queue, d->pkt, 1, &d->pkt_serial) < 0)
632  return -1;
633  if (old_serial != d->pkt_serial) {
634  avcodec_flush_buffers(d->avctx);
635  d->finished = 0;
636  d->next_pts = d->start_pts;
637  d->next_pts_tb = d->start_pts_tb;
638  }
639  }
640  if (d->queue->serial == d->pkt_serial)
641  break;
642  av_packet_unref(d->pkt);
643  } while (1);
644 
645  if (d->avctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
646  int got_frame = 0;
647  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, d->pkt);
648  if (ret < 0) {
649  ret = AVERROR(EAGAIN);
650  } else {
651  if (got_frame && !d->pkt->data) {
652  d->packet_pending = 1;
653  }
654  ret = got_frame ? 0 : (d->pkt->data ? AVERROR(EAGAIN) : AVERROR_EOF);
655  }
656  av_packet_unref(d->pkt);
657  } else {
658  if (d->pkt->buf && !d->pkt->opaque_ref) {
659  FrameData *fd;
660 
661  d->pkt->opaque_ref = av_buffer_allocz(sizeof(*fd));
662  if (!d->pkt->opaque_ref)
663  return AVERROR(ENOMEM);
664  fd = (FrameData*)d->pkt->opaque_ref->data;
665  fd->pkt_pos = d->pkt->pos;
666  }
667 
668  if (avcodec_send_packet(d->avctx, d->pkt) == AVERROR(EAGAIN)) {
669  av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
670  d->packet_pending = 1;
671  } else {
672  av_packet_unref(d->pkt);
673  }
674  }
675  }
676 }
677 
678 static void decoder_destroy(Decoder *d) {
679  av_packet_free(&d->pkt);
680  avcodec_free_context(&d->avctx);
681 }
682 
684 {
685  av_frame_unref(vp->frame);
686  avsubtitle_free(&vp->sub);
687 }
688 
689 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
690 {
691  int i;
692  memset(f, 0, sizeof(FrameQueue));
693  if (!(f->mutex = SDL_CreateMutex())) {
694  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
695  return AVERROR(ENOMEM);
696  }
697  if (!(f->cond = SDL_CreateCond())) {
698  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
699  return AVERROR(ENOMEM);
700  }
701  f->pktq = pktq;
702  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
703  f->keep_last = !!keep_last;
704  for (i = 0; i < f->max_size; i++)
705  if (!(f->queue[i].frame = av_frame_alloc()))
706  return AVERROR(ENOMEM);
707  return 0;
708 }
709 
711 {
712  int i;
713  for (i = 0; i < f->max_size; i++) {
714  Frame *vp = &f->queue[i];
716  av_frame_free(&vp->frame);
717  }
718  SDL_DestroyMutex(f->mutex);
719  SDL_DestroyCond(f->cond);
720 }
721 
723 {
724  SDL_LockMutex(f->mutex);
725  SDL_CondSignal(f->cond);
726  SDL_UnlockMutex(f->mutex);
727 }
728 
730 {
731  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
732 }
733 
735 {
736  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
737 }
738 
740 {
741  return &f->queue[f->rindex];
742 }
743 
745 {
746  /* wait until we have space to put a new frame */
747  SDL_LockMutex(f->mutex);
748  while (f->size >= f->max_size &&
749  !f->pktq->abort_request) {
750  SDL_CondWait(f->cond, f->mutex);
751  }
752  SDL_UnlockMutex(f->mutex);
753 
754  if (f->pktq->abort_request)
755  return NULL;
756 
757  return &f->queue[f->windex];
758 }
759 
761 {
762  /* wait until we have a readable a new frame */
763  SDL_LockMutex(f->mutex);
764  while (f->size - f->rindex_shown <= 0 &&
765  !f->pktq->abort_request) {
766  SDL_CondWait(f->cond, f->mutex);
767  }
768  SDL_UnlockMutex(f->mutex);
769 
770  if (f->pktq->abort_request)
771  return NULL;
772 
773  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
774 }
775 
777 {
778  if (++f->windex == f->max_size)
779  f->windex = 0;
780  SDL_LockMutex(f->mutex);
781  f->size++;
782  SDL_CondSignal(f->cond);
783  SDL_UnlockMutex(f->mutex);
784 }
785 
787 {
788  if (f->keep_last && !f->rindex_shown) {
789  f->rindex_shown = 1;
790  return;
791  }
792  frame_queue_unref_item(&f->queue[f->rindex]);
793  if (++f->rindex == f->max_size)
794  f->rindex = 0;
795  SDL_LockMutex(f->mutex);
796  f->size--;
797  SDL_CondSignal(f->cond);
798  SDL_UnlockMutex(f->mutex);
799 }
800 
801 /* return the number of undisplayed frames in the queue */
803 {
804  return f->size - f->rindex_shown;
805 }
806 
807 /* return last shown position */
809 {
810  Frame *fp = &f->queue[f->rindex];
811  if (f->rindex_shown && fp->serial == f->pktq->serial)
812  return fp->pos;
813  else
814  return -1;
815 }
816 
817 static void decoder_abort(Decoder *d, FrameQueue *fq)
818 {
819  packet_queue_abort(d->queue);
820  frame_queue_signal(fq);
821  SDL_WaitThread(d->decoder_tid, NULL);
822  d->decoder_tid = NULL;
823  packet_queue_flush(d->queue);
824 }
825 
826 static inline void fill_rectangle(int x, int y, int w, int h)
827 {
828  SDL_Rect rect;
829  rect.x = x;
830  rect.y = y;
831  rect.w = w;
832  rect.h = h;
833  if (w && h)
834  SDL_RenderFillRect(renderer, &rect);
835 }
836 
837 static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
838 {
839  Uint32 format;
840  int access, w, h;
841  if (!*texture || SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
842  void *pixels;
843  int pitch;
844  if (*texture)
845  SDL_DestroyTexture(*texture);
846  if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
847  return -1;
848  if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
849  return -1;
850  if (init_texture) {
851  if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
852  return -1;
853  memset(pixels, 0, pitch * new_height);
854  SDL_UnlockTexture(*texture);
855  }
856  av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
857  }
858  return 0;
859 }
860 
861 static void calculate_display_rect(SDL_Rect *rect,
862  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
863  int pic_width, int pic_height, AVRational pic_sar)
864 {
865  AVRational aspect_ratio = pic_sar;
866  int64_t width, height, x, y;
867 
868  if (av_cmp_q(aspect_ratio, av_make_q(0, 1)) <= 0)
869  aspect_ratio = av_make_q(1, 1);
870 
871  aspect_ratio = av_mul_q(aspect_ratio, av_make_q(pic_width, pic_height));
872 
873  /* XXX: we suppose the screen has a 1.0 pixel ratio */
874  height = scr_height;
875  width = av_rescale(height, aspect_ratio.num, aspect_ratio.den) & ~1;
876  if (width > scr_width) {
877  width = scr_width;
878  height = av_rescale(width, aspect_ratio.den, aspect_ratio.num) & ~1;
879  }
880  x = (scr_width - width) / 2;
881  y = (scr_height - height) / 2;
882  rect->x = scr_xleft + x;
883  rect->y = scr_ytop + y;
884  rect->w = FFMAX((int)width, 1);
885  rect->h = FFMAX((int)height, 1);
886 }
887 
888 static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
889 {
890  int i;
891  *sdl_blendmode = SDL_BLENDMODE_NONE;
892  *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
893  if (format == AV_PIX_FMT_RGB32 ||
897  *sdl_blendmode = SDL_BLENDMODE_BLEND;
898  for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
900  *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
901  return;
902  }
903  }
904 }
905 
906 static int upload_texture(SDL_Texture **tex, AVFrame *frame)
907 {
908  int ret = 0;
909  Uint32 sdl_pix_fmt;
910  SDL_BlendMode sdl_blendmode;
911  get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
912  if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
913  return -1;
914  switch (sdl_pix_fmt) {
915  case SDL_PIXELFORMAT_IYUV:
916  if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
917  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
918  frame->data[1], frame->linesize[1],
919  frame->data[2], frame->linesize[2]);
920  } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
921  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
922  frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
923  frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
924  } else {
925  av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
926  return -1;
927  }
928  break;
929  default:
930  if (frame->linesize[0] < 0) {
931  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
932  } else {
933  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
934  }
935  break;
936  }
937  return ret;
938 }
939 
941 {
942 #if SDL_VERSION_ATLEAST(2,0,8)
943  SDL_YUV_CONVERSION_MODE mode = SDL_YUV_CONVERSION_AUTOMATIC;
946  mode = SDL_YUV_CONVERSION_JPEG;
947  else if (frame->colorspace == AVCOL_SPC_BT709)
948  mode = SDL_YUV_CONVERSION_BT709;
950  mode = SDL_YUV_CONVERSION_BT601;
951  }
952  SDL_SetYUVConversionMode(mode); /* FIXME: no support for linear transfer */
953 #endif
954 }
955 
957 {
958  Frame *vp;
959  Frame *sp = NULL;
960  SDL_Rect rect;
961 
962  vp = frame_queue_peek_last(&is->pictq);
963  if (vk_renderer) {
965  return;
966  }
967 
968  if (is->subtitle_st) {
969  if (frame_queue_nb_remaining(&is->subpq) > 0) {
970  sp = frame_queue_peek(&is->subpq);
971 
972  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
973  if (!sp->uploaded) {
974  uint8_t* pixels[4];
975  int pitch[4];
976  int i;
977  if (!sp->width || !sp->height) {
978  sp->width = vp->width;
979  sp->height = vp->height;
980  }
981  if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
982  return;
983 
984  for (i = 0; i < sp->sub.num_rects; i++) {
985  AVSubtitleRect *sub_rect = sp->sub.rects[i];
986 
987  sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
988  sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
989  sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
990  sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
991 
992  is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx,
993  sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
994  sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
995  0, NULL, NULL, NULL);
996  if (!is->sub_convert_ctx) {
997  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
998  return;
999  }
1000  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) {
1001  sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
1002  0, sub_rect->h, pixels, pitch);
1003  SDL_UnlockTexture(is->sub_texture);
1004  }
1005  }
1006  sp->uploaded = 1;
1007  }
1008  } else
1009  sp = NULL;
1010  }
1011  }
1012 
1013  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1015 
1016  if (!vp->uploaded) {
1017  if (upload_texture(&is->vid_texture, vp->frame) < 0) {
1019  return;
1020  }
1021  vp->uploaded = 1;
1022  vp->flip_v = vp->frame->linesize[0] < 0;
1023  }
1024 
1025  SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
1027  if (sp) {
1028 #if USE_ONEPASS_SUBTITLE_RENDER
1029  SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
1030 #else
1031  int i;
1032  double xratio = (double)rect.w / (double)sp->width;
1033  double yratio = (double)rect.h / (double)sp->height;
1034  for (i = 0; i < sp->sub.num_rects; i++) {
1035  SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
1036  SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
1037  .y = rect.y + sub_rect->y * yratio,
1038  .w = sub_rect->w * xratio,
1039  .h = sub_rect->h * yratio};
1040  SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
1041  }
1042 #endif
1043  }
1044 }
1045 
1046 static inline int compute_mod(int a, int b)
1047 {
1048  return a < 0 ? a%b + b : a%b;
1049 }
1050 
1052 {
1053  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1054  int ch, channels, h, h2;
1055  int64_t time_diff;
1056  int rdft_bits, nb_freq;
1057 
1058  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1059  ;
1060  nb_freq = 1 << (rdft_bits - 1);
1061 
1062  /* compute display index : center on currently output samples */
1063  channels = s->audio_tgt.ch_layout.nb_channels;
1064  nb_display_channels = channels;
1065  if (!s->paused) {
1066  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1067  n = 2 * channels;
1068  delay = s->audio_write_buf_size;
1069  delay /= n;
1070 
1071  /* to be more precise, we take into account the time spent since
1072  the last buffer computation */
1073  if (audio_callback_time) {
1074  time_diff = av_gettime_relative() - audio_callback_time;
1075  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1076  }
1077 
1078  delay += 2 * data_used;
1079  if (delay < data_used)
1080  delay = data_used;
1081 
1082  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1083  if (s->show_mode == SHOW_MODE_WAVES) {
1084  h = INT_MIN;
1085  for (i = 0; i < 1000; i += channels) {
1086  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1087  int a = s->sample_array[idx];
1088  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1089  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1090  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1091  int score = a - d;
1092  if (h < score && (b ^ c) < 0) {
1093  h = score;
1094  i_start = idx;
1095  }
1096  }
1097  }
1098 
1099  s->last_i_start = i_start;
1100  } else {
1101  i_start = s->last_i_start;
1102  }
1103 
1104  if (s->show_mode == SHOW_MODE_WAVES) {
1105  SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
1106 
1107  /* total height for one channel */
1108  h = s->height / nb_display_channels;
1109  /* graph height / 2 */
1110  h2 = (h * 9) / 20;
1111  for (ch = 0; ch < nb_display_channels; ch++) {
1112  i = i_start + ch;
1113  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1114  for (x = 0; x < s->width; x++) {
1115  y = (s->sample_array[i] * h2) >> 15;
1116  if (y < 0) {
1117  y = -y;
1118  ys = y1 - y;
1119  } else {
1120  ys = y1;
1121  }
1122  fill_rectangle(s->xleft + x, ys, 1, y);
1123  i += channels;
1124  if (i >= SAMPLE_ARRAY_SIZE)
1125  i -= SAMPLE_ARRAY_SIZE;
1126  }
1127  }
1128 
1129  SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
1130 
1131  for (ch = 1; ch < nb_display_channels; ch++) {
1132  y = s->ytop + ch * h;
1133  fill_rectangle(s->xleft, y, s->width, 1);
1134  }
1135  } else {
1136  int err = 0;
1137  if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
1138  return;
1139 
1140  if (s->xpos >= s->width)
1141  s->xpos = 0;
1142  nb_display_channels= FFMIN(nb_display_channels, 2);
1143  if (rdft_bits != s->rdft_bits) {
1144  const float rdft_scale = 1.0;
1145  av_tx_uninit(&s->rdft);
1146  av_freep(&s->real_data);
1147  av_freep(&s->rdft_data);
1148  s->rdft_bits = rdft_bits;
1149  s->real_data = av_malloc_array(nb_freq, 4 *sizeof(*s->real_data));
1150  s->rdft_data = av_malloc_array(nb_freq + 1, 2 *sizeof(*s->rdft_data));
1151  err = av_tx_init(&s->rdft, &s->rdft_fn, AV_TX_FLOAT_RDFT,
1152  0, 1 << rdft_bits, &rdft_scale, 0);
1153  }
1154  if (err < 0 || !s->rdft_data) {
1155  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1156  s->show_mode = SHOW_MODE_WAVES;
1157  } else {
1158  float *data_in[2];
1159  AVComplexFloat *data[2];
1160  SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
1161  uint32_t *pixels;
1162  int pitch;
1163  for (ch = 0; ch < nb_display_channels; ch++) {
1164  data_in[ch] = s->real_data + 2 * nb_freq * ch;
1165  data[ch] = s->rdft_data + nb_freq * ch;
1166  i = i_start + ch;
1167  for (x = 0; x < 2 * nb_freq; x++) {
1168  double w = (x-nb_freq) * (1.0 / nb_freq);
1169  data_in[ch][x] = s->sample_array[i] * (1.0 - w * w);
1170  i += channels;
1171  if (i >= SAMPLE_ARRAY_SIZE)
1172  i -= SAMPLE_ARRAY_SIZE;
1173  }
1174  s->rdft_fn(s->rdft, data[ch], data_in[ch], sizeof(float));
1175  data[ch][0].im = data[ch][nb_freq].re;
1176  data[ch][nb_freq].re = 0;
1177  }
1178  /* Least efficient way to do this, we should of course
1179  * directly access it but it is more than fast enough. */
1180  if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
1181  pitch >>= 2;
1182  pixels += pitch * s->height;
1183  for (y = 0; y < s->height; y++) {
1184  double w = 1 / sqrt(nb_freq);
1185  int a = sqrt(w * sqrt(data[0][y].re * data[0][y].re + data[0][y].im * data[0][y].im));
1186  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][y].re, data[1][y].im))
1187  : a;
1188  a = FFMIN(a, 255);
1189  b = FFMIN(b, 255);
1190  pixels -= pitch;
1191  *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
1192  }
1193  SDL_UnlockTexture(s->vis_texture);
1194  }
1195  SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
1196  }
1197  if (!s->paused)
1198  s->xpos++;
1199  }
1200 }
1201 
1202 static void stream_component_close(VideoState *is, int stream_index)
1203 {
1204  AVFormatContext *ic = is->ic;
1205  AVCodecParameters *codecpar;
1206 
1207  if (stream_index < 0 || stream_index >= ic->nb_streams)
1208  return;
1209  codecpar = ic->streams[stream_index]->codecpar;
1210 
1211  switch (codecpar->codec_type) {
1212  case AVMEDIA_TYPE_AUDIO:
1213  decoder_abort(&is->auddec, &is->sampq);
1214  SDL_CloseAudioDevice(audio_dev);
1215  decoder_destroy(&is->auddec);
1216  swr_free(&is->swr_ctx);
1217  av_freep(&is->audio_buf1);
1218  is->audio_buf1_size = 0;
1219  is->audio_buf = NULL;
1220 
1221  if (is->rdft) {
1222  av_tx_uninit(&is->rdft);
1223  av_freep(&is->real_data);
1224  av_freep(&is->rdft_data);
1225  is->rdft = NULL;
1226  is->rdft_bits = 0;
1227  }
1228  break;
1229  case AVMEDIA_TYPE_VIDEO:
1230  decoder_abort(&is->viddec, &is->pictq);
1231  decoder_destroy(&is->viddec);
1232  break;
1233  case AVMEDIA_TYPE_SUBTITLE:
1234  decoder_abort(&is->subdec, &is->subpq);
1235  decoder_destroy(&is->subdec);
1236  break;
1237  default:
1238  break;
1239  }
1240 
1241  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1242  switch (codecpar->codec_type) {
1243  case AVMEDIA_TYPE_AUDIO:
1244  is->audio_st = NULL;
1245  is->audio_stream = -1;
1246  break;
1247  case AVMEDIA_TYPE_VIDEO:
1248  is->video_st = NULL;
1249  is->video_stream = -1;
1250  break;
1251  case AVMEDIA_TYPE_SUBTITLE:
1252  is->subtitle_st = NULL;
1253  is->subtitle_stream = -1;
1254  break;
1255  default:
1256  break;
1257  }
1258 }
1259 
1261 {
1262  /* XXX: use a special url_shutdown call to abort parse cleanly */
1263  is->abort_request = 1;
1264  SDL_WaitThread(is->read_tid, NULL);
1265 
1266  /* close each stream */
1267  if (is->audio_stream >= 0)
1268  stream_component_close(is, is->audio_stream);
1269  if (is->video_stream >= 0)
1270  stream_component_close(is, is->video_stream);
1271  if (is->subtitle_stream >= 0)
1272  stream_component_close(is, is->subtitle_stream);
1273 
1274  avformat_close_input(&is->ic);
1275 
1276  packet_queue_destroy(&is->videoq);
1277  packet_queue_destroy(&is->audioq);
1278  packet_queue_destroy(&is->subtitleq);
1279 
1280  /* free all pictures */
1281  frame_queue_destroy(&is->pictq);
1282  frame_queue_destroy(&is->sampq);
1283  frame_queue_destroy(&is->subpq);
1284  SDL_DestroyCond(is->continue_read_thread);
1285  sws_freeContext(is->sub_convert_ctx);
1286  av_free(is->filename);
1287  if (is->vis_texture)
1288  SDL_DestroyTexture(is->vis_texture);
1289  if (is->vid_texture)
1290  SDL_DestroyTexture(is->vid_texture);
1291  if (is->sub_texture)
1292  SDL_DestroyTexture(is->sub_texture);
1293  av_free(is);
1294 }
1295 
1296 static void do_exit(VideoState *is)
1297 {
1298  if (is) {
1299  stream_close(is);
1300  }
1301  if (renderer)
1302  SDL_DestroyRenderer(renderer);
1303  if (vk_renderer)
1305  if (window)
1306  SDL_DestroyWindow(window);
1307  uninit_opts();
1310  if (show_status)
1311  printf("\n");
1312  SDL_Quit();
1313  av_log(NULL, AV_LOG_QUIET, "%s", "");
1314  exit(0);
1315 }
1316 
1317 static void sigterm_handler(int sig)
1318 {
1319  exit(123);
1320 }
1321 
1323 {
1324  SDL_Rect rect;
1325  int max_width = screen_width ? screen_width : INT_MAX;
1326  int max_height = screen_height ? screen_height : INT_MAX;
1327  if (max_width == INT_MAX && max_height == INT_MAX)
1328  max_height = height;
1329  calculate_display_rect(&rect, 0, 0, max_width, max_height, width, height, sar);
1330  default_width = rect.w;
1331  default_height = rect.h;
1332 }
1333 
1335 {
1336  int w,h;
1337 
1340 
1341  if (!window_title)
1343  SDL_SetWindowTitle(window, window_title);
1344 
1345  SDL_SetWindowSize(window, w, h);
1346  SDL_SetWindowPosition(window, screen_left, screen_top);
1347  if (is_full_screen)
1348  SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1349  SDL_ShowWindow(window);
1350 
1351  is->width = w;
1352  is->height = h;
1353 
1354  return 0;
1355 }
1356 
1357 /* display the current picture, if any */
1359 {
1360  if (!is->width)
1361  video_open(is);
1362 
1363  SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1364  SDL_RenderClear(renderer);
1365  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1367  else if (is->video_st)
1369  SDL_RenderPresent(renderer);
1370 }
1371 
1372 static double get_clock(Clock *c)
1373 {
1374  if (*c->queue_serial != c->serial)
1375  return NAN;
1376  if (c->paused) {
1377  return c->pts;
1378  } else {
1379  double time = av_gettime_relative() / 1000000.0;
1380  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1381  }
1382 }
1383 
1384 static void set_clock_at(Clock *c, double pts, int serial, double time)
1385 {
1386  c->pts = pts;
1387  c->last_updated = time;
1388  c->pts_drift = c->pts - time;
1389  c->serial = serial;
1390 }
1391 
1392 static void set_clock(Clock *c, double pts, int serial)
1393 {
1394  double time = av_gettime_relative() / 1000000.0;
1395  set_clock_at(c, pts, serial, time);
1396 }
1397 
1398 static void set_clock_speed(Clock *c, double speed)
1399 {
1400  set_clock(c, get_clock(c), c->serial);
1401  c->speed = speed;
1402 }
1403 
1404 static void init_clock(Clock *c, int *queue_serial)
1405 {
1406  c->speed = 1.0;
1407  c->paused = 0;
1408  c->queue_serial = queue_serial;
1409  set_clock(c, NAN, -1);
1410 }
1411 
1412 static void sync_clock_to_slave(Clock *c, Clock *slave)
1413 {
1414  double clock = get_clock(c);
1415  double slave_clock = get_clock(slave);
1416  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1417  set_clock(c, slave_clock, slave->serial);
1418 }
1419 
1421  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1422  if (is->video_st)
1423  return AV_SYNC_VIDEO_MASTER;
1424  else
1425  return AV_SYNC_AUDIO_MASTER;
1426  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1427  if (is->audio_st)
1428  return AV_SYNC_AUDIO_MASTER;
1429  else
1430  return AV_SYNC_EXTERNAL_CLOCK;
1431  } else {
1432  return AV_SYNC_EXTERNAL_CLOCK;
1433  }
1434 }
1435 
1436 /* get the current master clock value */
1438 {
1439  double val;
1440 
1441  switch (get_master_sync_type(is)) {
1442  case AV_SYNC_VIDEO_MASTER:
1443  val = get_clock(&is->vidclk);
1444  break;
1445  case AV_SYNC_AUDIO_MASTER:
1446  val = get_clock(&is->audclk);
1447  break;
1448  default:
1449  val = get_clock(&is->extclk);
1450  break;
1451  }
1452  return val;
1453 }
1454 
1456  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1457  is->audio_stream >= 0 && is->audioq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES) {
1459  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1460  (is->audio_stream < 0 || is->audioq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES)) {
1462  } else {
1463  double speed = is->extclk.speed;
1464  if (speed != 1.0)
1465  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1466  }
1467 }
1468 
1469 /* seek in the stream */
1470 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int by_bytes)
1471 {
1472  if (!is->seek_req) {
1473  is->seek_pos = pos;
1474  is->seek_rel = rel;
1475  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1476  if (by_bytes)
1477  is->seek_flags |= AVSEEK_FLAG_BYTE;
1478  is->seek_req = 1;
1479  SDL_CondSignal(is->continue_read_thread);
1480  }
1481 }
1482 
1483 /* pause or resume the video */
1485 {
1486  if (is->paused) {
1487  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1488  if (is->read_pause_return != AVERROR(ENOSYS)) {
1489  is->vidclk.paused = 0;
1490  }
1491  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1492  }
1493  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1494  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1495 }
1496 
1498 {
1500  is->step = 0;
1501 }
1502 
1504 {
1505  is->muted = !is->muted;
1506 }
1507 
1508 static void update_volume(VideoState *is, int sign, double step)
1509 {
1510  double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1511  int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0));
1512  is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1513 }
1514 
1516 {
1517  /* if the stream is paused unpause it, then step */
1518  if (is->paused)
1520  is->step = 1;
1521 }
1522 
1523 static double compute_target_delay(double delay, VideoState *is)
1524 {
1525  double sync_threshold, diff = 0;
1526 
1527  /* update delay to follow master synchronisation source */
1529  /* if video is slave, we try to correct big delays by
1530  duplicating or deleting a frame */
1531  diff = get_clock(&is->vidclk) - get_master_clock(is);
1532 
1533  /* skip or repeat frame. We take into account the
1534  delay to compute the threshold. I still don't know
1535  if it is the best guess */
1536  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1537  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1538  if (diff <= -sync_threshold)
1539  delay = FFMAX(0, delay + diff);
1540  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1541  delay = delay + diff;
1542  else if (diff >= sync_threshold)
1543  delay = 2 * delay;
1544  }
1545  }
1546 
1547  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1548  delay, -diff);
1549 
1550  return delay;
1551 }
1552 
1553 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1554  if (vp->serial == nextvp->serial) {
1555  double duration = nextvp->pts - vp->pts;
1556  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1557  return vp->duration;
1558  else
1559  return duration;
1560  } else {
1561  return 0.0;
1562  }
1563 }
1564 
1565 static void update_video_pts(VideoState *is, double pts, int serial)
1566 {
1567  /* update current video pts */
1568  set_clock(&is->vidclk, pts, serial);
1569  sync_clock_to_slave(&is->extclk, &is->vidclk);
1570 }
1571 
1572 /* called to display each frame */
1573 static void video_refresh(void *opaque, double *remaining_time)
1574 {
1575  VideoState *is = opaque;
1576  double time;
1577 
1578  Frame *sp, *sp2;
1579 
1580  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1582 
1583  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1584  time = av_gettime_relative() / 1000000.0;
1585  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1586  video_display(is);
1587  is->last_vis_time = time;
1588  }
1589  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1590  }
1591 
1592  if (is->video_st) {
1593 retry:
1594  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1595  // nothing to do, no picture to display in the queue
1596  } else {
1597  double last_duration, duration, delay;
1598  Frame *vp, *lastvp;
1599 
1600  /* dequeue the picture */
1601  lastvp = frame_queue_peek_last(&is->pictq);
1602  vp = frame_queue_peek(&is->pictq);
1603 
1604  if (vp->serial != is->videoq.serial) {
1605  frame_queue_next(&is->pictq);
1606  goto retry;
1607  }
1608 
1609  if (lastvp->serial != vp->serial)
1610  is->frame_timer = av_gettime_relative() / 1000000.0;
1611 
1612  if (is->paused)
1613  goto display;
1614 
1615  /* compute nominal last_duration */
1616  last_duration = vp_duration(is, lastvp, vp);
1617  delay = compute_target_delay(last_duration, is);
1618 
1619  time= av_gettime_relative()/1000000.0;
1620  if (time < is->frame_timer + delay) {
1621  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1622  goto display;
1623  }
1624 
1625  is->frame_timer += delay;
1626  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1627  is->frame_timer = time;
1628 
1629  SDL_LockMutex(is->pictq.mutex);
1630  if (!isnan(vp->pts))
1631  update_video_pts(is, vp->pts, vp->serial);
1632  SDL_UnlockMutex(is->pictq.mutex);
1633 
1634  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1635  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1636  duration = vp_duration(is, vp, nextvp);
1637  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1638  is->frame_drops_late++;
1639  frame_queue_next(&is->pictq);
1640  goto retry;
1641  }
1642  }
1643 
1644  if (is->subtitle_st) {
1645  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1646  sp = frame_queue_peek(&is->subpq);
1647 
1648  if (frame_queue_nb_remaining(&is->subpq) > 1)
1649  sp2 = frame_queue_peek_next(&is->subpq);
1650  else
1651  sp2 = NULL;
1652 
1653  if (sp->serial != is->subtitleq.serial
1654  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1655  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1656  {
1657  if (sp->uploaded) {
1658  int i;
1659  for (i = 0; i < sp->sub.num_rects; i++) {
1660  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1661  uint8_t *pixels;
1662  int pitch, j;
1663 
1664  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
1665  for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1666  memset(pixels, 0, sub_rect->w << 2);
1667  SDL_UnlockTexture(is->sub_texture);
1668  }
1669  }
1670  }
1671  frame_queue_next(&is->subpq);
1672  } else {
1673  break;
1674  }
1675  }
1676  }
1677 
1678  frame_queue_next(&is->pictq);
1679  is->force_refresh = 1;
1680 
1681  if (is->step && !is->paused)
1683  }
1684 display:
1685  /* display picture */
1686  if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1687  video_display(is);
1688  }
1689  is->force_refresh = 0;
1690  if (show_status) {
1691  AVBPrint buf;
1692  static int64_t last_time;
1693  int64_t cur_time;
1694  int aqsize, vqsize, sqsize;
1695  double av_diff;
1696 
1697  cur_time = av_gettime_relative();
1698  if (!last_time || (cur_time - last_time) >= 30000) {
1699  aqsize = 0;
1700  vqsize = 0;
1701  sqsize = 0;
1702  if (is->audio_st)
1703  aqsize = is->audioq.size;
1704  if (is->video_st)
1705  vqsize = is->videoq.size;
1706  if (is->subtitle_st)
1707  sqsize = is->subtitleq.size;
1708  av_diff = 0;
1709  if (is->audio_st && is->video_st)
1710  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1711  else if (is->video_st)
1712  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1713  else if (is->audio_st)
1714  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1715 
1717  av_bprintf(&buf,
1718  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1720  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1721  av_diff,
1722  is->frame_drops_early + is->frame_drops_late,
1723  aqsize / 1024,
1724  vqsize / 1024,
1725  sqsize,
1726  is->video_st ? is->viddec.avctx->pts_correction_num_faulty_dts : 0,
1727  is->video_st ? is->viddec.avctx->pts_correction_num_faulty_pts : 0);
1728 
1729  if (show_status == 1 && AV_LOG_INFO > av_log_get_level())
1730  fprintf(stderr, "%s", buf.str);
1731  else
1732  av_log(NULL, AV_LOG_INFO, "%s", buf.str);
1733 
1734  fflush(stderr);
1735  av_bprint_finalize(&buf, NULL);
1736 
1737  last_time = cur_time;
1738  }
1739  }
1740 }
1741 
1742 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1743 {
1744  Frame *vp;
1745 
1746 #if defined(DEBUG_SYNC)
1747  printf("frame_type=%c pts=%0.3f\n",
1748  av_get_picture_type_char(src_frame->pict_type), pts);
1749 #endif
1750 
1751  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1752  return -1;
1753 
1754  vp->sar = src_frame->sample_aspect_ratio;
1755  vp->uploaded = 0;
1756 
1757  vp->width = src_frame->width;
1758  vp->height = src_frame->height;
1759  vp->format = src_frame->format;
1760 
1761  vp->pts = pts;
1762  vp->duration = duration;
1763  vp->pos = pos;
1764  vp->serial = serial;
1765 
1766  set_default_window_size(vp->width, vp->height, vp->sar);
1767 
1768  av_frame_move_ref(vp->frame, src_frame);
1769  frame_queue_push(&is->pictq);
1770  return 0;
1771 }
1772 
1774 {
1775  int got_picture;
1776 
1777  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1778  return -1;
1779 
1780  if (got_picture) {
1781  double dpts = NAN;
1782 
1783  if (frame->pts != AV_NOPTS_VALUE)
1784  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1785 
1787 
1789  if (frame->pts != AV_NOPTS_VALUE) {
1790  double diff = dpts - get_master_clock(is);
1791  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1792  diff - is->frame_last_filter_delay < 0 &&
1793  is->viddec.pkt_serial == is->vidclk.serial &&
1794  is->videoq.nb_packets) {
1795  is->frame_drops_early++;
1797  got_picture = 0;
1798  }
1799  }
1800  }
1801  }
1802 
1803  return got_picture;
1804 }
1805 
1806 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1807  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1808 {
1809  int ret, i;
1810  int nb_filters = graph->nb_filters;
1812 
1813  if (filtergraph) {
1816  if (!outputs || !inputs) {
1817  ret = AVERROR(ENOMEM);
1818  goto fail;
1819  }
1820 
1821  outputs->name = av_strdup("in");
1822  outputs->filter_ctx = source_ctx;
1823  outputs->pad_idx = 0;
1824  outputs->next = NULL;
1825 
1826  inputs->name = av_strdup("out");
1827  inputs->filter_ctx = sink_ctx;
1828  inputs->pad_idx = 0;
1829  inputs->next = NULL;
1830 
1831  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1832  goto fail;
1833  } else {
1834  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1835  goto fail;
1836  }
1837 
1838  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1839  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1840  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1841 
1842  ret = avfilter_graph_config(graph, NULL);
1843 fail:
1846  return ret;
1847 }
1848 
1849 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1850 {
1852  char sws_flags_str[512] = "";
1853  char buffersrc_args[256];
1854  int ret;
1855  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1856  AVCodecParameters *codecpar = is->video_st->codecpar;
1857  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1858  const AVDictionaryEntry *e = NULL;
1859  int nb_pix_fmts = 0;
1860  int i, j;
1861 
1862  for (i = 0; i < renderer_info.num_texture_formats; i++) {
1863  for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; j++) {
1864  if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
1865  pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
1866  break;
1867  }
1868  }
1869  }
1870  pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE;
1871 
1872  while ((e = av_dict_iterate(sws_dict, e))) {
1873  if (!strcmp(e->key, "sws_flags")) {
1874  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1875  } else
1876  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1877  }
1878  if (strlen(sws_flags_str))
1879  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1880 
1881  graph->scale_sws_opts = av_strdup(sws_flags_str);
1882 
1883  snprintf(buffersrc_args, sizeof(buffersrc_args),
1884  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1886  is->video_st->time_base.num, is->video_st->time_base.den,
1887  codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1));
1888  if (fr.num && fr.den)
1889  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1890 
1891  if ((ret = avfilter_graph_create_filter(&filt_src,
1892  avfilter_get_by_name("buffer"),
1893  "ffplay_buffer", buffersrc_args, NULL,
1894  graph)) < 0)
1895  goto fail;
1896 
1897  ret = avfilter_graph_create_filter(&filt_out,
1898  avfilter_get_by_name("buffersink"),
1899  "ffplay_buffersink", NULL, NULL, graph);
1900  if (ret < 0)
1901  goto fail;
1902 
1903  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1904  goto fail;
1905 
1906  last_filter = filt_out;
1907 
1908 /* Note: this macro adds a filter before the lastly added filter, so the
1909  * processing order of the filters is in reverse */
1910 #define INSERT_FILT(name, arg) do { \
1911  AVFilterContext *filt_ctx; \
1912  \
1913  ret = avfilter_graph_create_filter(&filt_ctx, \
1914  avfilter_get_by_name(name), \
1915  "ffplay_" name, arg, NULL, graph); \
1916  if (ret < 0) \
1917  goto fail; \
1918  \
1919  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1920  if (ret < 0) \
1921  goto fail; \
1922  \
1923  last_filter = filt_ctx; \
1924 } while (0)
1925 
1926  if (autorotate) {
1927  double theta = 0.0;
1928  int32_t *displaymatrix = NULL;
1930  if (sd)
1931  displaymatrix = (int32_t *)sd->data;
1932  if (!displaymatrix) {
1933  const AVPacketSideData *sd = av_packet_side_data_get(is->video_st->codecpar->coded_side_data,
1934  is->video_st->codecpar->nb_coded_side_data,
1936  if (sd)
1937  displaymatrix = (int32_t *)sd->data;
1938  }
1939  theta = get_rotation(displaymatrix);
1940 
1941  if (fabs(theta - 90) < 1.0) {
1942  INSERT_FILT("transpose", "clock");
1943  } else if (fabs(theta - 180) < 1.0) {
1944  INSERT_FILT("hflip", NULL);
1945  INSERT_FILT("vflip", NULL);
1946  } else if (fabs(theta - 270) < 1.0) {
1947  INSERT_FILT("transpose", "cclock");
1948  } else if (fabs(theta) > 1.0) {
1949  char rotate_buf[64];
1950  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1951  INSERT_FILT("rotate", rotate_buf);
1952  }
1953  }
1954 
1955  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1956  goto fail;
1957 
1958  is->in_video_filter = filt_src;
1959  is->out_video_filter = filt_out;
1960 
1961 fail:
1962  return ret;
1963 }
1964 
1965 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1966 {
1968  int sample_rates[2] = { 0, -1 };
1969  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1970  char aresample_swr_opts[512] = "";
1971  const AVDictionaryEntry *e = NULL;
1972  AVBPrint bp;
1973  char asrc_args[256];
1974  int ret;
1975 
1976  avfilter_graph_free(&is->agraph);
1977  if (!(is->agraph = avfilter_graph_alloc()))
1978  return AVERROR(ENOMEM);
1979  is->agraph->nb_threads = filter_nbthreads;
1980 
1982 
1983  while ((e = av_dict_iterate(swr_opts, e)))
1984  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
1985  if (strlen(aresample_swr_opts))
1986  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
1987  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
1988 
1989  av_channel_layout_describe_bprint(&is->audio_filter_src.ch_layout, &bp);
1990 
1991  ret = snprintf(asrc_args, sizeof(asrc_args),
1992  "sample_rate=%d:sample_fmt=%s:time_base=%d/%d:channel_layout=%s",
1993  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1994  1, is->audio_filter_src.freq, bp.str);
1995 
1996  ret = avfilter_graph_create_filter(&filt_asrc,
1997  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1998  asrc_args, NULL, is->agraph);
1999  if (ret < 0)
2000  goto end;
2001 
2002 
2003  ret = avfilter_graph_create_filter(&filt_asink,
2004  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
2005  NULL, NULL, is->agraph);
2006  if (ret < 0)
2007  goto end;
2008 
2009  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
2010  goto end;
2011  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
2012  goto end;
2013 
2014  if (force_output_format) {
2015  sample_rates [0] = is->audio_tgt.freq;
2016  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
2017  goto end;
2018  if ((ret = av_opt_set(filt_asink, "ch_layouts", bp.str, AV_OPT_SEARCH_CHILDREN)) < 0)
2019  goto end;
2020  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2021  goto end;
2022  }
2023 
2024 
2025  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2026  goto end;
2027 
2028  is->in_audio_filter = filt_asrc;
2029  is->out_audio_filter = filt_asink;
2030 
2031 end:
2032  if (ret < 0)
2033  avfilter_graph_free(&is->agraph);
2034  av_bprint_finalize(&bp, NULL);
2035 
2036  return ret;
2037 }
2038 
2039 static int audio_thread(void *arg)
2040 {
2041  VideoState *is = arg;
2043  Frame *af;
2044  int last_serial = -1;
2045  int reconfigure;
2046  int got_frame = 0;
2047  AVRational tb;
2048  int ret = 0;
2049 
2050  if (!frame)
2051  return AVERROR(ENOMEM);
2052 
2053  do {
2054  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2055  goto the_end;
2056 
2057  if (got_frame) {
2058  tb = (AVRational){1, frame->sample_rate};
2059 
2060  reconfigure =
2061  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.ch_layout.nb_channels,
2063  av_channel_layout_compare(&is->audio_filter_src.ch_layout, &frame->ch_layout) ||
2064  is->audio_filter_src.freq != frame->sample_rate ||
2065  is->auddec.pkt_serial != last_serial;
2066 
2067  if (reconfigure) {
2068  char buf1[1024], buf2[1024];
2069  av_channel_layout_describe(&is->audio_filter_src.ch_layout, buf1, sizeof(buf1));
2070  av_channel_layout_describe(&frame->ch_layout, buf2, sizeof(buf2));
2072  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2073  is->audio_filter_src.freq, is->audio_filter_src.ch_layout.nb_channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2075 
2076  is->audio_filter_src.fmt = frame->format;
2077  ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &frame->ch_layout);
2078  if (ret < 0)
2079  goto the_end;
2080  is->audio_filter_src.freq = frame->sample_rate;
2081  last_serial = is->auddec.pkt_serial;
2082 
2083  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2084  goto the_end;
2085  }
2086 
2087  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2088  goto the_end;
2089 
2090  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2092  tb = av_buffersink_get_time_base(is->out_audio_filter);
2093  if (!(af = frame_queue_peek_writable(&is->sampq)))
2094  goto the_end;
2095 
2096  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2097  af->pos = fd ? fd->pkt_pos : -1;
2098  af->serial = is->auddec.pkt_serial;
2100 
2102  frame_queue_push(&is->sampq);
2103 
2104  if (is->audioq.serial != is->auddec.pkt_serial)
2105  break;
2106  }
2107  if (ret == AVERROR_EOF)
2108  is->auddec.finished = is->auddec.pkt_serial;
2109  }
2110  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2111  the_end:
2112  avfilter_graph_free(&is->agraph);
2113  av_frame_free(&frame);
2114  return ret;
2115 }
2116 
2117 static int decoder_start(Decoder *d, int (*fn)(void *), const char *thread_name, void* arg)
2118 {
2119  packet_queue_start(d->queue);
2120  d->decoder_tid = SDL_CreateThread(fn, thread_name, arg);
2121  if (!d->decoder_tid) {
2122  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2123  return AVERROR(ENOMEM);
2124  }
2125  return 0;
2126 }
2127 
2128 static int video_thread(void *arg)
2129 {
2130  VideoState *is = arg;
2132  double pts;
2133  double duration;
2134  int ret;
2135  AVRational tb = is->video_st->time_base;
2136  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2137 
2138  AVFilterGraph *graph = NULL;
2139  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2140  int last_w = 0;
2141  int last_h = 0;
2142  enum AVPixelFormat last_format = -2;
2143  int last_serial = -1;
2144  int last_vfilter_idx = 0;
2145 
2146  if (!frame)
2147  return AVERROR(ENOMEM);
2148 
2149  for (;;) {
2151  if (ret < 0)
2152  goto the_end;
2153  if (!ret)
2154  continue;
2155 
2156  if ( last_w != frame->width
2157  || last_h != frame->height
2158  || last_format != frame->format
2159  || last_serial != is->viddec.pkt_serial
2160  || last_vfilter_idx != is->vfilter_idx) {
2162  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2163  last_w, last_h,
2164  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2165  frame->width, frame->height,
2166  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2167  avfilter_graph_free(&graph);
2168  graph = avfilter_graph_alloc();
2169  if (!graph) {
2170  ret = AVERROR(ENOMEM);
2171  goto the_end;
2172  }
2173  graph->nb_threads = filter_nbthreads;
2174  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2175  SDL_Event event;
2176  event.type = FF_QUIT_EVENT;
2177  event.user.data1 = is;
2178  SDL_PushEvent(&event);
2179  goto the_end;
2180  }
2181  filt_in = is->in_video_filter;
2182  filt_out = is->out_video_filter;
2183  last_w = frame->width;
2184  last_h = frame->height;
2185  last_format = frame->format;
2186  last_serial = is->viddec.pkt_serial;
2187  last_vfilter_idx = is->vfilter_idx;
2188  frame_rate = av_buffersink_get_frame_rate(filt_out);
2189  }
2190 
2191  ret = av_buffersrc_add_frame(filt_in, frame);
2192  if (ret < 0)
2193  goto the_end;
2194 
2195  while (ret >= 0) {
2196  FrameData *fd;
2197 
2198  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2199 
2200  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2201  if (ret < 0) {
2202  if (ret == AVERROR_EOF)
2203  is->viddec.finished = is->viddec.pkt_serial;
2204  ret = 0;
2205  break;
2206  }
2207 
2209 
2210  is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
2211  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2212  is->frame_last_filter_delay = 0;
2213  tb = av_buffersink_get_time_base(filt_out);
2214  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2215  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2216  ret = queue_picture(is, frame, pts, duration, fd ? fd->pkt_pos : -1, is->viddec.pkt_serial);
2218  if (is->videoq.serial != is->viddec.pkt_serial)
2219  break;
2220  }
2221 
2222  if (ret < 0)
2223  goto the_end;
2224  }
2225  the_end:
2226  avfilter_graph_free(&graph);
2227  av_frame_free(&frame);
2228  return 0;
2229 }
2230 
2231 static int subtitle_thread(void *arg)
2232 {
2233  VideoState *is = arg;
2234  Frame *sp;
2235  int got_subtitle;
2236  double pts;
2237 
2238  for (;;) {
2239  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2240  return 0;
2241 
2242  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2243  break;
2244 
2245  pts = 0;
2246 
2247  if (got_subtitle && sp->sub.format == 0) {
2248  if (sp->sub.pts != AV_NOPTS_VALUE)
2249  pts = sp->sub.pts / (double)AV_TIME_BASE;
2250  sp->pts = pts;
2251  sp->serial = is->subdec.pkt_serial;
2252  sp->width = is->subdec.avctx->width;
2253  sp->height = is->subdec.avctx->height;
2254  sp->uploaded = 0;
2255 
2256  /* now we can update the picture count */
2257  frame_queue_push(&is->subpq);
2258  } else if (got_subtitle) {
2259  avsubtitle_free(&sp->sub);
2260  }
2261  }
2262  return 0;
2263 }
2264 
2265 /* copy samples for viewing in editor window */
2266 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2267 {
2268  int size, len;
2269 
2270  size = samples_size / sizeof(short);
2271  while (size > 0) {
2272  len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2273  if (len > size)
2274  len = size;
2275  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2276  samples += len;
2277  is->sample_array_index += len;
2278  if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2279  is->sample_array_index = 0;
2280  size -= len;
2281  }
2282 }
2283 
2284 /* return the wanted number of samples to get better sync if sync_type is video
2285  * or external master clock */
2286 static int synchronize_audio(VideoState *is, int nb_samples)
2287 {
2288  int wanted_nb_samples = nb_samples;
2289 
2290  /* if not master, then we try to remove or add samples to correct the clock */
2292  double diff, avg_diff;
2293  int min_nb_samples, max_nb_samples;
2294 
2295  diff = get_clock(&is->audclk) - get_master_clock(is);
2296 
2297  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2298  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2299  if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2300  /* not enough measures to have a correct estimate */
2301  is->audio_diff_avg_count++;
2302  } else {
2303  /* estimate the A-V difference */
2304  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2305 
2306  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2307  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2308  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2309  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2310  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2311  }
2312  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2313  diff, avg_diff, wanted_nb_samples - nb_samples,
2314  is->audio_clock, is->audio_diff_threshold);
2315  }
2316  } else {
2317  /* too big difference : may be initial PTS errors, so
2318  reset A-V filter */
2319  is->audio_diff_avg_count = 0;
2320  is->audio_diff_cum = 0;
2321  }
2322  }
2323 
2324  return wanted_nb_samples;
2325 }
2326 
2327 /**
2328  * Decode one audio frame and return its uncompressed size.
2329  *
2330  * The processed audio frame is decoded, converted if required, and
2331  * stored in is->audio_buf, with size in bytes given by the return
2332  * value.
2333  */
2335 {
2336  int data_size, resampled_data_size;
2337  av_unused double audio_clock0;
2338  int wanted_nb_samples;
2339  Frame *af;
2340 
2341  if (is->paused)
2342  return -1;
2343 
2344  do {
2345 #if defined(_WIN32)
2346  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2347  if ((av_gettime_relative() - audio_callback_time) > 1000000LL * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec / 2)
2348  return -1;
2349  av_usleep (1000);
2350  }
2351 #endif
2352  if (!(af = frame_queue_peek_readable(&is->sampq)))
2353  return -1;
2354  frame_queue_next(&is->sampq);
2355  } while (af->serial != is->audioq.serial);
2356 
2358  af->frame->nb_samples,
2359  af->frame->format, 1);
2360 
2361  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2362 
2363  if (af->frame->format != is->audio_src.fmt ||
2364  av_channel_layout_compare(&af->frame->ch_layout, &is->audio_src.ch_layout) ||
2365  af->frame->sample_rate != is->audio_src.freq ||
2366  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2367  swr_free(&is->swr_ctx);
2368  swr_alloc_set_opts2(&is->swr_ctx,
2369  &is->audio_tgt.ch_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2370  &af->frame->ch_layout, af->frame->format, af->frame->sample_rate,
2371  0, NULL);
2372  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2374  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2376  is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.ch_layout.nb_channels);
2377  swr_free(&is->swr_ctx);
2378  return -1;
2379  }
2380  if (av_channel_layout_copy(&is->audio_src.ch_layout, &af->frame->ch_layout) < 0)
2381  return -1;
2382  is->audio_src.freq = af->frame->sample_rate;
2383  is->audio_src.fmt = af->frame->format;
2384  }
2385 
2386  if (is->swr_ctx) {
2387  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2388  uint8_t **out = &is->audio_buf1;
2389  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2390  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.ch_layout.nb_channels, out_count, is->audio_tgt.fmt, 0);
2391  int len2;
2392  if (out_size < 0) {
2393  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2394  return -1;
2395  }
2396  if (wanted_nb_samples != af->frame->nb_samples) {
2397  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2398  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2399  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2400  return -1;
2401  }
2402  }
2403  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2404  if (!is->audio_buf1)
2405  return AVERROR(ENOMEM);
2406  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2407  if (len2 < 0) {
2408  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2409  return -1;
2410  }
2411  if (len2 == out_count) {
2412  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2413  if (swr_init(is->swr_ctx) < 0)
2414  swr_free(&is->swr_ctx);
2415  }
2416  is->audio_buf = is->audio_buf1;
2417  resampled_data_size = len2 * is->audio_tgt.ch_layout.nb_channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2418  } else {
2419  is->audio_buf = af->frame->data[0];
2420  resampled_data_size = data_size;
2421  }
2422 
2423  audio_clock0 = is->audio_clock;
2424  /* update the audio clock with the pts */
2425  if (!isnan(af->pts))
2426  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2427  else
2428  is->audio_clock = NAN;
2429  is->audio_clock_serial = af->serial;
2430 #ifdef DEBUG
2431  {
2432  static double last_clock;
2433  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2434  is->audio_clock - last_clock,
2435  is->audio_clock, audio_clock0);
2436  last_clock = is->audio_clock;
2437  }
2438 #endif
2439  return resampled_data_size;
2440 }
2441 
2442 /* prepare a new audio buffer */
2443 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2444 {
2445  VideoState *is = opaque;
2446  int audio_size, len1;
2447 
2449 
2450  while (len > 0) {
2451  if (is->audio_buf_index >= is->audio_buf_size) {
2452  audio_size = audio_decode_frame(is);
2453  if (audio_size < 0) {
2454  /* if error, just output silence */
2455  is->audio_buf = NULL;
2456  is->audio_buf_size = SDL_AUDIO_MIN_BUFFER_SIZE / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2457  } else {
2458  if (is->show_mode != SHOW_MODE_VIDEO)
2459  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2460  is->audio_buf_size = audio_size;
2461  }
2462  is->audio_buf_index = 0;
2463  }
2464  len1 = is->audio_buf_size - is->audio_buf_index;
2465  if (len1 > len)
2466  len1 = len;
2467  if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2468  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2469  else {
2470  memset(stream, 0, len1);
2471  if (!is->muted && is->audio_buf)
2472  SDL_MixAudioFormat(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, is->audio_volume);
2473  }
2474  len -= len1;
2475  stream += len1;
2476  is->audio_buf_index += len1;
2477  }
2478  is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2479  /* Let's assume the audio driver that is used by SDL has two periods. */
2480  if (!isnan(is->audio_clock)) {
2481  set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
2482  sync_clock_to_slave(&is->extclk, &is->audclk);
2483  }
2484 }
2485 
2486 static int audio_open(void *opaque, AVChannelLayout *wanted_channel_layout, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2487 {
2488  SDL_AudioSpec wanted_spec, spec;
2489  const char *env;
2490  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2491  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2492  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2493  int wanted_nb_channels = wanted_channel_layout->nb_channels;
2494 
2495  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2496  if (env) {
2497  wanted_nb_channels = atoi(env);
2498  av_channel_layout_uninit(wanted_channel_layout);
2499  av_channel_layout_default(wanted_channel_layout, wanted_nb_channels);
2500  }
2501  if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) {
2502  av_channel_layout_uninit(wanted_channel_layout);
2503  av_channel_layout_default(wanted_channel_layout, wanted_nb_channels);
2504  }
2505  wanted_nb_channels = wanted_channel_layout->nb_channels;
2506  wanted_spec.channels = wanted_nb_channels;
2507  wanted_spec.freq = wanted_sample_rate;
2508  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2509  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2510  return -1;
2511  }
2512  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2513  next_sample_rate_idx--;
2514  wanted_spec.format = AUDIO_S16SYS;
2515  wanted_spec.silence = 0;
2516  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2517  wanted_spec.callback = sdl_audio_callback;
2518  wanted_spec.userdata = opaque;
2519  while (!(audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2520  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2521  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2522  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2523  if (!wanted_spec.channels) {
2524  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2525  wanted_spec.channels = wanted_nb_channels;
2526  if (!wanted_spec.freq) {
2528  "No more combinations to try, audio open failed\n");
2529  return -1;
2530  }
2531  }
2532  av_channel_layout_default(wanted_channel_layout, wanted_spec.channels);
2533  }
2534  if (spec.format != AUDIO_S16SYS) {
2536  "SDL advised audio format %d is not supported!\n", spec.format);
2537  return -1;
2538  }
2539  if (spec.channels != wanted_spec.channels) {
2540  av_channel_layout_uninit(wanted_channel_layout);
2541  av_channel_layout_default(wanted_channel_layout, spec.channels);
2542  if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) {
2544  "SDL advised channel count %d is not supported!\n", spec.channels);
2545  return -1;
2546  }
2547  }
2548 
2549  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2550  audio_hw_params->freq = spec.freq;
2551  if (av_channel_layout_copy(&audio_hw_params->ch_layout, wanted_channel_layout) < 0)
2552  return -1;
2553  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, 1, audio_hw_params->fmt, 1);
2554  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2555  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2556  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2557  return -1;
2558  }
2559  return spec.size;
2560 }
2561 
2562 static int create_hwaccel(AVBufferRef **device_ctx)
2563 {
2564  enum AVHWDeviceType type;
2565  int ret;
2566  AVBufferRef *vk_dev;
2567 
2568  *device_ctx = NULL;
2569 
2570  if (!hwaccel)
2571  return 0;
2572 
2574  if (type == AV_HWDEVICE_TYPE_NONE)
2575  return AVERROR(ENOTSUP);
2576 
2578  if (ret < 0)
2579  return ret;
2580 
2581  ret = av_hwdevice_ctx_create_derived(device_ctx, type, vk_dev, 0);
2582  if (!ret)
2583  return 0;
2584 
2585  if (ret != AVERROR(ENOSYS))
2586  return ret;
2587 
2588  av_log(NULL, AV_LOG_WARNING, "Derive %s from vulkan not supported.\n", hwaccel);
2589  ret = av_hwdevice_ctx_create(device_ctx, type, NULL, NULL, 0);
2590  return ret;
2591 }
2592 
2593 /* open a given stream. Return 0 if OK */
2594 static int stream_component_open(VideoState *is, int stream_index)
2595 {
2596  AVFormatContext *ic = is->ic;
2597  AVCodecContext *avctx;
2598  const AVCodec *codec;
2599  const char *forced_codec_name = NULL;
2600  AVDictionary *opts = NULL;
2601  const AVDictionaryEntry *t = NULL;
2602  int sample_rate;
2603  AVChannelLayout ch_layout = { 0 };
2604  int ret = 0;
2605  int stream_lowres = lowres;
2606 
2607  if (stream_index < 0 || stream_index >= ic->nb_streams)
2608  return -1;
2609 
2610  avctx = avcodec_alloc_context3(NULL);
2611  if (!avctx)
2612  return AVERROR(ENOMEM);
2613 
2614  ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2615  if (ret < 0)
2616  goto fail;
2617  avctx->pkt_timebase = ic->streams[stream_index]->time_base;
2618 
2619  codec = avcodec_find_decoder(avctx->codec_id);
2620 
2621  switch(avctx->codec_type){
2622  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2623  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2624  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2625  }
2626  if (forced_codec_name)
2627  codec = avcodec_find_decoder_by_name(forced_codec_name);
2628  if (!codec) {
2629  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2630  "No codec could be found with name '%s'\n", forced_codec_name);
2631  else av_log(NULL, AV_LOG_WARNING,
2632  "No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id));
2633  ret = AVERROR(EINVAL);
2634  goto fail;
2635  }
2636 
2637  avctx->codec_id = codec->id;
2638  if (stream_lowres > codec->max_lowres) {
2639  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2640  codec->max_lowres);
2641  stream_lowres = codec->max_lowres;
2642  }
2643  avctx->lowres = stream_lowres;
2644 
2645  if (fast)
2646  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2647 
2648  ret = filter_codec_opts(codec_opts, avctx->codec_id, ic,
2649  ic->streams[stream_index], codec, &opts);
2650  if (ret < 0)
2651  goto fail;
2652 
2653  if (!av_dict_get(opts, "threads", NULL, 0))
2654  av_dict_set(&opts, "threads", "auto", 0);
2655  if (stream_lowres)
2656  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2657 
2658  av_dict_set(&opts, "flags", "+copy_opaque", AV_DICT_MULTIKEY);
2659 
2660  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2661  ret = create_hwaccel(&avctx->hw_device_ctx);
2662  if (ret < 0)
2663  goto fail;
2664  }
2665 
2666  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2667  goto fail;
2668  }
2669  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2670  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2672  goto fail;
2673  }
2674 
2675  is->eof = 0;
2676  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2677  switch (avctx->codec_type) {
2678  case AVMEDIA_TYPE_AUDIO:
2679  {
2680  AVFilterContext *sink;
2681 
2682  is->audio_filter_src.freq = avctx->sample_rate;
2683  ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &avctx->ch_layout);
2684  if (ret < 0)
2685  goto fail;
2686  is->audio_filter_src.fmt = avctx->sample_fmt;
2687  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2688  goto fail;
2689  sink = is->out_audio_filter;
2691  ret = av_buffersink_get_ch_layout(sink, &ch_layout);
2692  if (ret < 0)
2693  goto fail;
2694  }
2695 
2696  /* prepare audio output */
2697  if ((ret = audio_open(is, &ch_layout, sample_rate, &is->audio_tgt)) < 0)
2698  goto fail;
2699  is->audio_hw_buf_size = ret;
2700  is->audio_src = is->audio_tgt;
2701  is->audio_buf_size = 0;
2702  is->audio_buf_index = 0;
2703 
2704  /* init averaging filter */
2705  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2706  is->audio_diff_avg_count = 0;
2707  /* since we do not have a precise anough audio FIFO fullness,
2708  we correct audio sync only if larger than this threshold */
2709  is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;
2710 
2711  is->audio_stream = stream_index;
2712  is->audio_st = ic->streams[stream_index];
2713 
2714  if ((ret = decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread)) < 0)
2715  goto fail;
2716  if (is->ic->iformat->flags & AVFMT_NOTIMESTAMPS) {
2717  is->auddec.start_pts = is->audio_st->start_time;
2718  is->auddec.start_pts_tb = is->audio_st->time_base;
2719  }
2720  if ((ret = decoder_start(&is->auddec, audio_thread, "audio_decoder", is)) < 0)
2721  goto out;
2722  SDL_PauseAudioDevice(audio_dev, 0);
2723  break;
2724  case AVMEDIA_TYPE_VIDEO:
2725  is->video_stream = stream_index;
2726  is->video_st = ic->streams[stream_index];
2727 
2728  if ((ret = decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread)) < 0)
2729  goto fail;
2730  if ((ret = decoder_start(&is->viddec, video_thread, "video_decoder", is)) < 0)
2731  goto out;
2732  is->queue_attachments_req = 1;
2733  break;
2734  case AVMEDIA_TYPE_SUBTITLE:
2735  is->subtitle_stream = stream_index;
2736  is->subtitle_st = ic->streams[stream_index];
2737 
2738  if ((ret = decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread)) < 0)
2739  goto fail;
2740  if ((ret = decoder_start(&is->subdec, subtitle_thread, "subtitle_decoder", is)) < 0)
2741  goto out;
2742  break;
2743  default:
2744  break;
2745  }
2746  goto out;
2747 
2748 fail:
2749  avcodec_free_context(&avctx);
2750 out:
2751  av_channel_layout_uninit(&ch_layout);
2752  av_dict_free(&opts);
2753 
2754  return ret;
2755 }
2756 
2757 static int decode_interrupt_cb(void *ctx)
2758 {
2759  VideoState *is = ctx;
2760  return is->abort_request;
2761 }
2762 
2763 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2764  return stream_id < 0 ||
2765  queue->abort_request ||
2767  queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2768 }
2769 
2771 {
2772  if( !strcmp(s->iformat->name, "rtp")
2773  || !strcmp(s->iformat->name, "rtsp")
2774  || !strcmp(s->iformat->name, "sdp")
2775  )
2776  return 1;
2777 
2778  if(s->pb && ( !strncmp(s->url, "rtp:", 4)
2779  || !strncmp(s->url, "udp:", 4)
2780  )
2781  )
2782  return 1;
2783  return 0;
2784 }
2785 
2786 /* this thread gets the stream from the disk or the network */
2787 static int read_thread(void *arg)
2788 {
2789  VideoState *is = arg;
2790  AVFormatContext *ic = NULL;
2791  int err, i, ret;
2792  int st_index[AVMEDIA_TYPE_NB];
2793  AVPacket *pkt = NULL;
2794  int64_t stream_start_time;
2795  int pkt_in_play_range = 0;
2796  const AVDictionaryEntry *t;
2797  SDL_mutex *wait_mutex = SDL_CreateMutex();
2798  int scan_all_pmts_set = 0;
2799  int64_t pkt_ts;
2800 
2801  if (!wait_mutex) {
2802  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2803  ret = AVERROR(ENOMEM);
2804  goto fail;
2805  }
2806 
2807  memset(st_index, -1, sizeof(st_index));
2808  is->eof = 0;
2809 
2810  pkt = av_packet_alloc();
2811  if (!pkt) {
2812  av_log(NULL, AV_LOG_FATAL, "Could not allocate packet.\n");
2813  ret = AVERROR(ENOMEM);
2814  goto fail;
2815  }
2816  ic = avformat_alloc_context();
2817  if (!ic) {
2818  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2819  ret = AVERROR(ENOMEM);
2820  goto fail;
2821  }
2824  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2825  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2826  scan_all_pmts_set = 1;
2827  }
2828  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2829  if (err < 0) {
2830  print_error(is->filename, err);
2831  ret = -1;
2832  goto fail;
2833  }
2834  if (scan_all_pmts_set)
2835  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2836 
2838  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2840  goto fail;
2841  }
2842  is->ic = ic;
2843 
2844  if (genpts)
2845  ic->flags |= AVFMT_FLAG_GENPTS;
2846 
2847  if (find_stream_info) {
2848  AVDictionary **opts;
2849  int orig_nb_streams = ic->nb_streams;
2850 
2852  if (err < 0) {
2854  "Error setting up avformat_find_stream_info() options\n");
2855  ret = err;
2856  goto fail;
2857  }
2858 
2859  err = avformat_find_stream_info(ic, opts);
2860 
2861  for (i = 0; i < orig_nb_streams; i++)
2862  av_dict_free(&opts[i]);
2863  av_freep(&opts);
2864 
2865  if (err < 0) {
2867  "%s: could not find codec parameters\n", is->filename);
2868  ret = -1;
2869  goto fail;
2870  }
2871  }
2872 
2873  if (ic->pb)
2874  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2875 
2876  if (seek_by_bytes < 0)
2878  !!(ic->iformat->flags & AVFMT_TS_DISCONT) &&
2879  strcmp("ogg", ic->iformat->name);
2880 
2881  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2882 
2883  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2884  window_title = av_asprintf("%s - %s", t->value, input_filename);
2885 
2886  /* if seeking requested, we execute it */
2887  if (start_time != AV_NOPTS_VALUE) {
2888  int64_t timestamp;
2889 
2890  timestamp = start_time;
2891  /* add the stream start time */
2892  if (ic->start_time != AV_NOPTS_VALUE)
2893  timestamp += ic->start_time;
2894  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2895  if (ret < 0) {
2896  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2897  is->filename, (double)timestamp / AV_TIME_BASE);
2898  }
2899  }
2900 
2901  is->realtime = is_realtime(ic);
2902 
2903  if (show_status)
2904  av_dump_format(ic, 0, is->filename, 0);
2905 
2906  for (i = 0; i < ic->nb_streams; i++) {
2907  AVStream *st = ic->streams[i];
2908  enum AVMediaType type = st->codecpar->codec_type;
2909  st->discard = AVDISCARD_ALL;
2910  if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
2912  st_index[type] = i;
2913  }
2914  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2915  if (wanted_stream_spec[i] && st_index[i] == -1) {
2916  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2917  st_index[i] = INT_MAX;
2918  }
2919  }
2920 
2921  if (!video_disable)
2922  st_index[AVMEDIA_TYPE_VIDEO] =
2924  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2925  if (!audio_disable)
2926  st_index[AVMEDIA_TYPE_AUDIO] =
2928  st_index[AVMEDIA_TYPE_AUDIO],
2929  st_index[AVMEDIA_TYPE_VIDEO],
2930  NULL, 0);
2932  st_index[AVMEDIA_TYPE_SUBTITLE] =
2934  st_index[AVMEDIA_TYPE_SUBTITLE],
2935  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2936  st_index[AVMEDIA_TYPE_AUDIO] :
2937  st_index[AVMEDIA_TYPE_VIDEO]),
2938  NULL, 0);
2939 
2940  is->show_mode = show_mode;
2941  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2942  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2943  AVCodecParameters *codecpar = st->codecpar;
2945  if (codecpar->width)
2946  set_default_window_size(codecpar->width, codecpar->height, sar);
2947  }
2948 
2949  /* open the streams */
2950  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2952  }
2953 
2954  ret = -1;
2955  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2957  }
2958  if (is->show_mode == SHOW_MODE_NONE)
2959  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2960 
2961  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2963  }
2964 
2965  if (is->video_stream < 0 && is->audio_stream < 0) {
2966  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2967  is->filename);
2968  ret = -1;
2969  goto fail;
2970  }
2971 
2972  if (infinite_buffer < 0 && is->realtime)
2973  infinite_buffer = 1;
2974 
2975  for (;;) {
2976  if (is->abort_request)
2977  break;
2978  if (is->paused != is->last_paused) {
2979  is->last_paused = is->paused;
2980  if (is->paused)
2981  is->read_pause_return = av_read_pause(ic);
2982  else
2983  av_read_play(ic);
2984  }
2985 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2986  if (is->paused &&
2987  (!strcmp(ic->iformat->name, "rtsp") ||
2988  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2989  /* wait 10 ms to avoid trying to get another packet */
2990  /* XXX: horrible */
2991  SDL_Delay(10);
2992  continue;
2993  }
2994 #endif
2995  if (is->seek_req) {
2996  int64_t seek_target = is->seek_pos;
2997  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2998  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2999 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
3000 // of the seek_pos/seek_rel variables
3001 
3002  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
3003  if (ret < 0) {
3005  "%s: error while seeking\n", is->ic->url);
3006  } else {
3007  if (is->audio_stream >= 0)
3008  packet_queue_flush(&is->audioq);
3009  if (is->subtitle_stream >= 0)
3010  packet_queue_flush(&is->subtitleq);
3011  if (is->video_stream >= 0)
3012  packet_queue_flush(&is->videoq);
3013  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
3014  set_clock(&is->extclk, NAN, 0);
3015  } else {
3016  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
3017  }
3018  }
3019  is->seek_req = 0;
3020  is->queue_attachments_req = 1;
3021  is->eof = 0;
3022  if (is->paused)
3024  }
3025  if (is->queue_attachments_req) {
3026  if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
3027  if ((ret = av_packet_ref(pkt, &is->video_st->attached_pic)) < 0)
3028  goto fail;
3029  packet_queue_put(&is->videoq, pkt);
3030  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
3031  }
3032  is->queue_attachments_req = 0;
3033  }
3034 
3035  /* if the queue are full, no need to read more */
3036  if (infinite_buffer<1 &&
3037  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
3038  || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
3039  stream_has_enough_packets(is->video_st, is->video_stream, &is->videoq) &&
3040  stream_has_enough_packets(is->subtitle_st, is->subtitle_stream, &is->subtitleq)))) {
3041  /* wait 10 ms */
3042  SDL_LockMutex(wait_mutex);
3043  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3044  SDL_UnlockMutex(wait_mutex);
3045  continue;
3046  }
3047  if (!is->paused &&
3048  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3049  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3050  if (loop != 1 && (!loop || --loop)) {
3052  } else if (autoexit) {
3053  ret = AVERROR_EOF;
3054  goto fail;
3055  }
3056  }
3057  ret = av_read_frame(ic, pkt);
3058  if (ret < 0) {
3059  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3060  if (is->video_stream >= 0)
3061  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
3062  if (is->audio_stream >= 0)
3063  packet_queue_put_nullpacket(&is->audioq, pkt, is->audio_stream);
3064  if (is->subtitle_stream >= 0)
3065  packet_queue_put_nullpacket(&is->subtitleq, pkt, is->subtitle_stream);
3066  is->eof = 1;
3067  }
3068  if (ic->pb && ic->pb->error) {
3069  if (autoexit)
3070  goto fail;
3071  else
3072  break;
3073  }
3074  SDL_LockMutex(wait_mutex);
3075  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3076  SDL_UnlockMutex(wait_mutex);
3077  continue;
3078  } else {
3079  is->eof = 0;
3080  }
3081  /* check if packet is in play range specified by user, then queue, otherwise discard */
3082  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3083  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3084  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3085  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3087  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3088  <= ((double)duration / 1000000);
3089  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3090  packet_queue_put(&is->audioq, pkt);
3091  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3092  && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
3093  packet_queue_put(&is->videoq, pkt);
3094  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3095  packet_queue_put(&is->subtitleq, pkt);
3096  } else {
3098  }
3099  }
3100 
3101  ret = 0;
3102  fail:
3103  if (ic && !is->ic)
3104  avformat_close_input(&ic);
3105 
3106  av_packet_free(&pkt);
3107  if (ret != 0) {
3108  SDL_Event event;
3109 
3110  event.type = FF_QUIT_EVENT;
3111  event.user.data1 = is;
3112  SDL_PushEvent(&event);
3113  }
3114  SDL_DestroyMutex(wait_mutex);
3115  return 0;
3116 }
3117 
3118 static VideoState *stream_open(const char *filename,
3119  const AVInputFormat *iformat)
3120 {
3121  VideoState *is;
3122 
3123  is = av_mallocz(sizeof(VideoState));
3124  if (!is)
3125  return NULL;
3126  is->last_video_stream = is->video_stream = -1;
3127  is->last_audio_stream = is->audio_stream = -1;
3128  is->last_subtitle_stream = is->subtitle_stream = -1;
3129  is->filename = av_strdup(filename);
3130  if (!is->filename)
3131  goto fail;
3132  is->iformat = iformat;
3133  is->ytop = 0;
3134  is->xleft = 0;
3135 
3136  /* start video display */
3137  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3138  goto fail;
3139  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3140  goto fail;
3141  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3142  goto fail;
3143 
3144  if (packet_queue_init(&is->videoq) < 0 ||
3145  packet_queue_init(&is->audioq) < 0 ||
3146  packet_queue_init(&is->subtitleq) < 0)
3147  goto fail;
3148 
3149  if (!(is->continue_read_thread = SDL_CreateCond())) {
3150  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3151  goto fail;
3152  }
3153 
3154  init_clock(&is->vidclk, &is->videoq.serial);
3155  init_clock(&is->audclk, &is->audioq.serial);
3156  init_clock(&is->extclk, &is->extclk.serial);
3157  is->audio_clock_serial = -1;
3158  if (startup_volume < 0)
3159  av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
3160  if (startup_volume > 100)
3161  av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
3163  startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
3164  is->audio_volume = startup_volume;
3165  is->muted = 0;
3166  is->av_sync_type = av_sync_type;
3167  is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
3168  if (!is->read_tid) {
3169  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3170 fail:
3171  stream_close(is);
3172  return NULL;
3173  }
3174  return is;
3175 }
3176 
3178 {
3179  AVFormatContext *ic = is->ic;
3180  int start_index, stream_index;
3181  int old_index;
3182  AVStream *st;
3183  AVProgram *p = NULL;
3184  int nb_streams = is->ic->nb_streams;
3185 
3186  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3187  start_index = is->last_video_stream;
3188  old_index = is->video_stream;
3189  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3190  start_index = is->last_audio_stream;
3191  old_index = is->audio_stream;
3192  } else {
3193  start_index = is->last_subtitle_stream;
3194  old_index = is->subtitle_stream;
3195  }
3196  stream_index = start_index;
3197 
3198  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3199  p = av_find_program_from_stream(ic, NULL, is->video_stream);
3200  if (p) {
3202  for (start_index = 0; start_index < nb_streams; start_index++)
3203  if (p->stream_index[start_index] == stream_index)
3204  break;
3205  if (start_index == nb_streams)
3206  start_index = -1;
3207  stream_index = start_index;
3208  }
3209  }
3210 
3211  for (;;) {
3212  if (++stream_index >= nb_streams)
3213  {
3215  {
3216  stream_index = -1;
3217  is->last_subtitle_stream = -1;
3218  goto the_end;
3219  }
3220  if (start_index == -1)
3221  return;
3222  stream_index = 0;
3223  }
3224  if (stream_index == start_index)
3225  return;
3226  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3227  if (st->codecpar->codec_type == codec_type) {
3228  /* check that parameters are OK */
3229  switch (codec_type) {
3230  case AVMEDIA_TYPE_AUDIO:
3231  if (st->codecpar->sample_rate != 0 &&
3232  st->codecpar->ch_layout.nb_channels != 0)
3233  goto the_end;
3234  break;
3235  case AVMEDIA_TYPE_VIDEO:
3236  case AVMEDIA_TYPE_SUBTITLE:
3237  goto the_end;
3238  default:
3239  break;
3240  }
3241  }
3242  }
3243  the_end:
3244  if (p && stream_index != -1)
3245  stream_index = p->stream_index[stream_index];
3246  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3248  old_index,
3249  stream_index);
3250 
3251  stream_component_close(is, old_index);
3252  stream_component_open(is, stream_index);
3253 }
3254 
3255 
3257 {
3259  SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
3260 }
3261 
3263 {
3264  int next = is->show_mode;
3265  do {
3266  next = (next + 1) % SHOW_MODE_NB;
3267  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3268  if (is->show_mode != next) {
3269  is->force_refresh = 1;
3270  is->show_mode = next;
3271  }
3272 }
3273 
3274 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3275  double remaining_time = 0.0;
3276  SDL_PumpEvents();
3277  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3279  SDL_ShowCursor(0);
3280  cursor_hidden = 1;
3281  }
3282  if (remaining_time > 0.0)
3283  av_usleep((int64_t)(remaining_time * 1000000.0));
3284  remaining_time = REFRESH_RATE;
3285  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3286  video_refresh(is, &remaining_time);
3287  SDL_PumpEvents();
3288  }
3289 }
3290 
3291 static void seek_chapter(VideoState *is, int incr)
3292 {
3293  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3294  int i;
3295 
3296  if (!is->ic->nb_chapters)
3297  return;
3298 
3299  /* find the current chapter */
3300  for (i = 0; i < is->ic->nb_chapters; i++) {
3301  AVChapter *ch = is->ic->chapters[i];
3302  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3303  i--;
3304  break;
3305  }
3306  }
3307 
3308  i += incr;
3309  i = FFMAX(i, 0);
3310  if (i >= is->ic->nb_chapters)
3311  return;
3312 
3313  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3314  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3315  AV_TIME_BASE_Q), 0, 0);
3316 }
3317 
3318 /* handle an event sent by the GUI */
3319 static void event_loop(VideoState *cur_stream)
3320 {
3321  SDL_Event event;
3322  double incr, pos, frac;
3323 
3324  for (;;) {
3325  double x;
3326  refresh_loop_wait_event(cur_stream, &event);
3327  switch (event.type) {
3328  case SDL_KEYDOWN:
3329  if (exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
3330  do_exit(cur_stream);
3331  break;
3332  }
3333  // If we don't yet have a window, skip all key events, because read_thread might still be initializing...
3334  if (!cur_stream->width)
3335  continue;
3336  switch (event.key.keysym.sym) {
3337  case SDLK_f:
3338  toggle_full_screen(cur_stream);
3339  cur_stream->force_refresh = 1;
3340  break;
3341  case SDLK_p:
3342  case SDLK_SPACE:
3343  toggle_pause(cur_stream);
3344  break;
3345  case SDLK_m:
3346  toggle_mute(cur_stream);
3347  break;
3348  case SDLK_KP_MULTIPLY:
3349  case SDLK_0:
3350  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3351  break;
3352  case SDLK_KP_DIVIDE:
3353  case SDLK_9:
3354  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3355  break;
3356  case SDLK_s: // S: Step to next frame
3357  step_to_next_frame(cur_stream);
3358  break;
3359  case SDLK_a:
3361  break;
3362  case SDLK_v:
3364  break;
3365  case SDLK_c:
3369  break;
3370  case SDLK_t:
3372  break;
3373  case SDLK_w:
3374  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3375  if (++cur_stream->vfilter_idx >= nb_vfilters)
3376  cur_stream->vfilter_idx = 0;
3377  } else {
3378  cur_stream->vfilter_idx = 0;
3379  toggle_audio_display(cur_stream);
3380  }
3381  break;
3382  case SDLK_PAGEUP:
3383  if (cur_stream->ic->nb_chapters <= 1) {
3384  incr = 600.0;
3385  goto do_seek;
3386  }
3387  seek_chapter(cur_stream, 1);
3388  break;
3389  case SDLK_PAGEDOWN:
3390  if (cur_stream->ic->nb_chapters <= 1) {
3391  incr = -600.0;
3392  goto do_seek;
3393  }
3394  seek_chapter(cur_stream, -1);
3395  break;
3396  case SDLK_LEFT:
3397  incr = seek_interval ? -seek_interval : -10.0;
3398  goto do_seek;
3399  case SDLK_RIGHT:
3400  incr = seek_interval ? seek_interval : 10.0;
3401  goto do_seek;
3402  case SDLK_UP:
3403  incr = 60.0;
3404  goto do_seek;
3405  case SDLK_DOWN:
3406  incr = -60.0;
3407  do_seek:
3408  if (seek_by_bytes) {
3409  pos = -1;
3410  if (pos < 0 && cur_stream->video_stream >= 0)
3411  pos = frame_queue_last_pos(&cur_stream->pictq);
3412  if (pos < 0 && cur_stream->audio_stream >= 0)
3413  pos = frame_queue_last_pos(&cur_stream->sampq);
3414  if (pos < 0)
3415  pos = avio_tell(cur_stream->ic->pb);
3416  if (cur_stream->ic->bit_rate)
3417  incr *= cur_stream->ic->bit_rate / 8.0;
3418  else
3419  incr *= 180000.0;
3420  pos += incr;
3421  stream_seek(cur_stream, pos, incr, 1);
3422  } else {
3423  pos = get_master_clock(cur_stream);
3424  if (isnan(pos))
3425  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3426  pos += incr;
3427  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3428  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3429  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3430  }
3431  break;
3432  default:
3433  break;
3434  }
3435  break;
3436  case SDL_MOUSEBUTTONDOWN:
3437  if (exit_on_mousedown) {
3438  do_exit(cur_stream);
3439  break;
3440  }
3441  if (event.button.button == SDL_BUTTON_LEFT) {
3442  static int64_t last_mouse_left_click = 0;
3443  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3444  toggle_full_screen(cur_stream);
3445  cur_stream->force_refresh = 1;
3446  last_mouse_left_click = 0;
3447  } else {
3448  last_mouse_left_click = av_gettime_relative();
3449  }
3450  }
3451  case SDL_MOUSEMOTION:
3452  if (cursor_hidden) {
3453  SDL_ShowCursor(1);
3454  cursor_hidden = 0;
3455  }
3457  if (event.type == SDL_MOUSEBUTTONDOWN) {
3458  if (event.button.button != SDL_BUTTON_RIGHT)
3459  break;
3460  x = event.button.x;
3461  } else {
3462  if (!(event.motion.state & SDL_BUTTON_RMASK))
3463  break;
3464  x = event.motion.x;
3465  }
3466  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3467  uint64_t size = avio_size(cur_stream->ic->pb);
3468  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3469  } else {
3470  int64_t ts;
3471  int ns, hh, mm, ss;
3472  int tns, thh, tmm, tss;
3473  tns = cur_stream->ic->duration / 1000000LL;
3474  thh = tns / 3600;
3475  tmm = (tns % 3600) / 60;
3476  tss = (tns % 60);
3477  frac = x / cur_stream->width;
3478  ns = frac * tns;
3479  hh = ns / 3600;
3480  mm = (ns % 3600) / 60;
3481  ss = (ns % 60);
3483  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3484  hh, mm, ss, thh, tmm, tss);
3485  ts = frac * cur_stream->ic->duration;
3486  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3487  ts += cur_stream->ic->start_time;
3488  stream_seek(cur_stream, ts, 0, 0);
3489  }
3490  break;
3491  case SDL_WINDOWEVENT:
3492  switch (event.window.event) {
3493  case SDL_WINDOWEVENT_SIZE_CHANGED:
3494  screen_width = cur_stream->width = event.window.data1;
3495  screen_height = cur_stream->height = event.window.data2;
3496  if (cur_stream->vis_texture) {
3497  SDL_DestroyTexture(cur_stream->vis_texture);
3498  cur_stream->vis_texture = NULL;
3499  }
3500  if (vk_renderer)
3502  case SDL_WINDOWEVENT_EXPOSED:
3503  cur_stream->force_refresh = 1;
3504  }
3505  break;
3506  case SDL_QUIT:
3507  case FF_QUIT_EVENT:
3508  do_exit(cur_stream);
3509  break;
3510  default:
3511  break;
3512  }
3513  }
3514 }
3515 
3516 static int opt_width(void *optctx, const char *opt, const char *arg)
3517 {
3518  double num;
3519  int ret = parse_number(opt, arg, OPT_INT64, 1, INT_MAX, &num);
3520  if (ret < 0)
3521  return ret;
3522 
3523  screen_width = num;
3524  return 0;
3525 }
3526 
3527 static int opt_height(void *optctx, const char *opt, const char *arg)
3528 {
3529  double num;
3530  int ret = parse_number(opt, arg, OPT_INT64, 1, INT_MAX, &num);
3531  if (ret < 0)
3532  return ret;
3533 
3534  screen_height = num;
3535  return 0;
3536 }
3537 
3538 static int opt_format(void *optctx, const char *opt, const char *arg)
3539 {
3541  if (!file_iformat) {
3542  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3543  return AVERROR(EINVAL);
3544  }
3545  return 0;
3546 }
3547 
3548 static int opt_sync(void *optctx, const char *opt, const char *arg)
3549 {
3550  if (!strcmp(arg, "audio"))
3552  else if (!strcmp(arg, "video"))
3554  else if (!strcmp(arg, "ext"))
3556  else {
3557  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3558  exit(1);
3559  }
3560  return 0;
3561 }
3562 
3563 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3564 {
3565  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3566  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3567  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT : SHOW_MODE_NONE;
3568 
3569  if (show_mode == SHOW_MODE_NONE) {
3570  double num;
3571  int ret = parse_number(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1, &num);
3572  if (ret < 0)
3573  return ret;
3574  show_mode = num;
3575  }
3576  return 0;
3577 }
3578 
3579 static int opt_input_file(void *optctx, const char *filename)
3580 {
3581  if (input_filename) {
3583  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3584  filename, input_filename);
3585  return AVERROR(EINVAL);
3586  }
3587  if (!strcmp(filename, "-"))
3588  filename = "fd:";
3589  input_filename = filename;
3590 
3591  return 0;
3592 }
3593 
3594 static int opt_codec(void *optctx, const char *opt, const char *arg)
3595 {
3596  const char *spec = strchr(opt, ':');
3597  if (!spec) {
3599  "No media specifier was specified in '%s' in option '%s'\n",
3600  arg, opt);
3601  return AVERROR(EINVAL);
3602  }
3603  spec++;
3604  switch (spec[0]) {
3605  case 'a' : audio_codec_name = arg; break;
3606  case 's' : subtitle_codec_name = arg; break;
3607  case 'v' : video_codec_name = arg; break;
3608  default:
3610  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3611  return AVERROR(EINVAL);
3612  }
3613  return 0;
3614 }
3615 
3616 static int dummy;
3617 
3618 static const OptionDef options[] = {
3620  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3621  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3622  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3623  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3624  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3625  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3626  { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3627  { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3628  { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3629  { "ss", HAS_ARG | OPT_TIME, { &start_time }, "seek to a given position in seconds", "pos" },
3630  { "t", HAS_ARG | OPT_TIME, { &duration }, "play \"duration\" seconds of audio/video", "duration" },
3631  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3632  { "seek_interval", OPT_FLOAT | HAS_ARG, { &seek_interval }, "set seek interval for left/right keys, in seconds", "seconds" },
3633  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3634  { "noborder", OPT_BOOL, { &borderless }, "borderless window" },
3635  { "alwaysontop", OPT_BOOL, { &alwaysontop }, "window always on top" },
3636  { "volume", OPT_INT | HAS_ARG, { &startup_volume}, "set startup volume 0=min 100=max", "volume" },
3637  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3638  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3639  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3640  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3641  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3642  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3643  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3644  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3645  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3646  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3647  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3648  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3649  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3650  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3651  { "left", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_left }, "set the x position for the left of the window", "x pos" },
3652  { "top", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_top }, "set the y position for the top of the window", "y pos" },
3653  { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3654  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3655  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3656  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3657  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3658  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3659  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3660  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3661  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3662  { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3663  { "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
3664  "read and decode the streams to fill missing information with heuristics" },
3665  { "filter_threads", HAS_ARG | OPT_INT | OPT_EXPERT, { &filter_nbthreads }, "number of filter threads per graph" },
3666  { "enable_vulkan", OPT_BOOL, { &enable_vulkan }, "enable vulkan renderer" },
3667  { "vulkan_params", HAS_ARG | OPT_STRING | OPT_EXPERT, { &vulkan_params }, "vulkan configuration using a list of key=value pairs separated by ':'" },
3668  { "hwaccel", HAS_ARG | OPT_STRING | OPT_EXPERT, { &hwaccel }, "use HW accelerated decoding" },
3669  { NULL, },
3670 };
3671 
3672 static void show_usage(void)
3673 {
3674  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3675  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3676  av_log(NULL, AV_LOG_INFO, "\n");
3677 }
3678 
3679 void show_help_default(const char *opt, const char *arg)
3680 {
3682  show_usage();
3683  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3684  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3685  printf("\n");
3689  printf("\nWhile playing:\n"
3690  "q, ESC quit\n"
3691  "f toggle full screen\n"
3692  "p, SPC pause\n"
3693  "m toggle mute\n"
3694  "9, 0 decrease and increase volume respectively\n"
3695  "/, * decrease and increase volume respectively\n"
3696  "a cycle audio channel in the current program\n"
3697  "v cycle video channel\n"
3698  "t cycle subtitle channel in the current program\n"
3699  "c cycle program\n"
3700  "w cycle video filters or show modes\n"
3701  "s activate frame-step mode\n"
3702  "left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
3703  "down/up seek backward/forward 1 minute\n"
3704  "page down/page up seek backward/forward 10 minutes\n"
3705  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3706  "left double-click toggle full screen\n"
3707  );
3708 }
3709 
3710 /* Called from the main */
3711 int main(int argc, char **argv)
3712 {
3713  int flags, ret;
3714  VideoState *is;
3715 
3716  init_dynload();
3717 
3719  parse_loglevel(argc, argv, options);
3720 
3721  /* register all codecs, demux and protocols */
3722 #if CONFIG_AVDEVICE
3724 #endif
3726 
3727  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3728  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3729 
3730  show_banner(argc, argv, options);
3731 
3732  ret = parse_options(NULL, argc, argv, options, opt_input_file);
3733  if (ret < 0)
3734  exit(ret == AVERROR_EXIT ? 0 : 1);
3735 
3736  if (!input_filename) {
3737  show_usage();
3738  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3740  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3741  exit(1);
3742  }
3743 
3744  if (display_disable) {
3745  video_disable = 1;
3746  }
3747  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3748  if (audio_disable)
3749  flags &= ~SDL_INIT_AUDIO;
3750  else {
3751  /* Try to work around an occasional ALSA buffer underflow issue when the
3752  * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3753  if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3754  SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
3755  }
3756  if (display_disable)
3757  flags &= ~SDL_INIT_VIDEO;
3758  if (SDL_Init (flags)) {
3759  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3760  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3761  exit(1);
3762  }
3763 
3764  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3765  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3766 
3767  if (!display_disable) {
3768  int flags = SDL_WINDOW_HIDDEN;
3769  if (alwaysontop)
3770 #if SDL_VERSION_ATLEAST(2,0,5)
3771  flags |= SDL_WINDOW_ALWAYS_ON_TOP;
3772 #else
3773  av_log(NULL, AV_LOG_WARNING, "Your SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.\n");
3774 #endif
3775  if (borderless)
3776  flags |= SDL_WINDOW_BORDERLESS;
3777  else
3778  flags |= SDL_WINDOW_RESIZABLE;
3779 
3780 #ifdef SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR
3781  SDL_SetHint(SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR, "0");
3782 #endif
3783  if (hwaccel && !enable_vulkan) {
3784  av_log(NULL, AV_LOG_INFO, "Enable vulkan renderer to support hwaccel %s\n", hwaccel);
3785  enable_vulkan = 1;
3786  }
3787  if (enable_vulkan) {
3789  if (vk_renderer) {
3790 #if SDL_VERSION_ATLEAST(2, 0, 6)
3791  flags |= SDL_WINDOW_VULKAN;
3792 #endif
3793  } else {
3794  av_log(NULL, AV_LOG_WARNING, "Doesn't support vulkan renderer, fallback to SDL renderer\n");
3795  enable_vulkan = 0;
3796  }
3797  }
3798  window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
3799  SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
3800  if (!window) {
3801  av_log(NULL, AV_LOG_FATAL, "Failed to create window: %s", SDL_GetError());
3802  do_exit(NULL);
3803  }
3804 
3805  if (vk_renderer) {
3806  AVDictionary *dict = NULL;
3807 
3808  if (vulkan_params)
3809  av_dict_parse_string(&dict, vulkan_params, "=", ":", 0);
3811  av_dict_free(&dict);
3812  if (ret < 0) {
3813  av_log(NULL, AV_LOG_FATAL, "Failed to create vulkan renderer, %s\n", av_err2str(ret));
3814  do_exit(NULL);
3815  }
3816  } else {
3817  renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
3818  if (!renderer) {
3819  av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
3820  renderer = SDL_CreateRenderer(window, -1, 0);
3821  }
3822  if (renderer) {
3823  if (!SDL_GetRendererInfo(renderer, &renderer_info))
3824  av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", renderer_info.name);
3825  }
3826  if (!renderer || !renderer_info.num_texture_formats) {
3827  av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError());
3828  do_exit(NULL);
3829  }
3830  }
3831  }
3832 
3834  if (!is) {
3835  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3836  do_exit(NULL);
3837  }
3838 
3839  event_loop(is);
3840 
3841  /* never returns */
3842 
3843  return 0;
3844 }
OPT_FLOAT
#define OPT_FLOAT
Definition: cmdutils.h:116
AVSubtitle
Definition: avcodec.h:2269
rect::w
int w
Definition: f_ebur128.c:78
sws_getCachedContext
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2495
do_exit
static void do_exit(VideoState *is)
Definition: ffplay.c:1296
VideoState::seek_rel
int64_t seek_rel
Definition: ffplay.c:215
AV_PKT_DATA_DISPLAYMATRIX
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:109
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:423
AVCodec
AVCodec.
Definition: codec.h:187
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
VideoState::video_st
AVStream * video_st
Definition: ffplay.c:285
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:656
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:221
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
Frame::width
int width
Definition: ffplay.c:162
AVFMT_NO_BYTE_SEEK
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:488
AV_SYNC_VIDEO_MASTER
@ AV_SYNC_VIDEO_MASTER
Definition: ffplay.c:185
av_clip
#define av_clip
Definition: common.h:96
VideoState::rdft
AVTXContext * rdft
Definition: ffplay.c:266
AudioParams::fmt
enum AVSampleFormat fmt
Definition: ffplay.c:135
av_sync_type
static int av_sync_type
Definition: ffplay.c:328
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
renderer_info
static SDL_RendererInfo renderer_info
Definition: ffplay.c:366
FrameData::pkt_pos
int64_t pkt_pos
Definition: ffplay.c:151
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1049
frame_queue_nb_remaining
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:802
VideoState::agraph
AVFilterGraph * agraph
Definition: ffplay.c:300
configure_audio_filters
static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
Definition: ffplay.c:1965
opt_add_vfilter
static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:397
frame_queue_next
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:786
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:51
Decoder::finished
int finished
Definition: ffplay.c:194
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:890
av_compare_ts
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
FrameData
Definition: ffmpeg.h:629
AVProgram::nb_stream_indexes
unsigned int nb_stream_indexes
Definition: avformat.h:1044
frame_queue_last_pos
static int64_t frame_queue_last_pos(FrameQueue *f)
Definition: ffplay.c:808
av_find_best_stream
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, const AVCodec **decoder_ret, int flags)
Definition: avformat.c:390
out
FILE * out
Definition: movenc.c:54
VideoState::rdft_fn
av_tx_fn rdft_fn
Definition: ffplay.c:267
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1064
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
video_thread
static int video_thread(void *arg)
Definition: ffplay.c:2128
VideoState::av_sync_type
int av_sync_type
Definition: ffplay.c:234
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:824
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
set_default_window_size
static void set_default_window_size(int width, int height, AVRational sar)
Definition: ffplay.c:1322
AV_PIX_FMT_BGR32
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:443
AV_NOSYNC_THRESHOLD
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:87
AVFormatContext::nb_chapters
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1332
AVCodecParameters
This struct describes the properties of an encoded stream.
Definition: codec_par.h:47
AV_LOG_QUIET
#define AV_LOG_QUIET
Print no output.
Definition: log.h:162
FrameQueue::keep_last
int keep_last
Definition: ffplay.c:176
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:947
VideoState::audio_hw_buf_size
int audio_hw_buf_size
Definition: ffplay.c:244
decoder_decode_frame
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
Definition: ffplay.c:579
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_DISPOSITION_ATTACHED_PIC
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:772
FrameQueue::cond
SDL_cond * cond
Definition: ffplay.c:179
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:148
AVStream::discard
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:912
av_find_program_from_stream
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: avformat.c:339
display_disable
static int display_disable
Definition: ffplay.c:323
screen_width
static int screen_width
Definition: ffplay.c:313
ffplay_renderer.h
DEBUG
#define DEBUG
Definition: vf_framerate.c:29
sws_dict
AVDictionary * sws_dict
Definition: cmdutils.c:58
swr_set_compensation
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:1021
SAMPLE_ARRAY_SIZE
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:105
AVTXContext
Definition: tx_priv.h:235
rect
Definition: f_ebur128.c:78
update_volume
static void update_volume(VideoState *is, int sign, double step)
Definition: ffplay.c:1508
AVFMT_NOTIMESTAMPS
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:480
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:264
VideoState::auddec
Decoder auddec
Definition: ffplay.c:228
screen_left
static int screen_left
Definition: ffplay.c:315
av_opt_set_int_list
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:721
AudioParams::frame_size
int frame_size
Definition: ffplay.c:136
AVSubtitleRect
Definition: avcodec.h:2241
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
Decoder::next_pts
int64_t next_pts
Definition: ffplay.c:199
decoder_start
static int decoder_start(Decoder *d, int(*fn)(void *), const char *thread_name, void *arg)
Definition: ffplay.c:2117
AV_SYNC_EXTERNAL_CLOCK
@ AV_SYNC_EXTERNAL_CLOCK
Definition: ffplay.c:186
rect::y
int y
Definition: f_ebur128.c:78
FrameQueue::size
int size
Definition: ffplay.c:174
avformat_get_class
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:205
av_unused
#define av_unused
Definition: attributes.h:131
normalize.log
log
Definition: normalize.py:21
Frame::sar
AVRational sar
Definition: ffplay.c:165
out_size
int out_size
Definition: movenc.c:55
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
VideoState::vis_texture
SDL_Texture * vis_texture
Definition: ffplay.c:273
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:667
queue_picture
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
Definition: ffplay.c:1742
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
AudioParams
Definition: ffplay.c:132
VideoState::subtitle_st
AVStream * subtitle_st
Definition: ffplay.c:278
VideoState::audio_filter_src
struct AudioParams audio_filter_src
Definition: ffplay.c:254
pixdesc.h
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1183
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:452
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
VideoState::frame_last_filter_delay
double frame_last_filter_delay
Definition: ffplay.c:283
AVFrame::width
int width
Definition: frame.h:412
VideoState::xleft
int xleft
Definition: ffplay.c:292
AVPacketSideData
This structure stores auxiliary information for decoding, presenting, or otherwise processing the cod...
Definition: packet.h:342
Frame::pts
double pts
Definition: ffplay.c:159
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:673
OPT_INPUT
#define OPT_INPUT
Definition: cmdutils.h:126
frame_queue_init
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
Definition: ffplay.c:689
subtitle_codec_name
static const char * subtitle_codec_name
Definition: ffplay.c:343
AV_HWDEVICE_TYPE_NONE
@ AV_HWDEVICE_TYPE_NONE
Definition: hwcontext.h:28
EXTERNAL_CLOCK_MIN_FRAMES
#define EXTERNAL_CLOCK_MIN_FRAMES
Definition: ffplay.c:69
HAS_ARG
#define HAS_ARG
Definition: cmdutils.h:109
b
#define b
Definition: input.c:41
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:158
vk_renderer_create
int vk_renderer_create(VkRenderer *renderer, SDL_Window *window, AVDictionary *opt)
Definition: ffplay_renderer.c:800
AVChapter::start
int64_t start
Definition: avformat.h:1077
Clock
Definition: ffplay.c:140
data
const char data[16]
Definition: mxf.c:148
SAMPLE_QUEUE_SIZE
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:129
program_name
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:64
Decoder::queue
PacketQueue * queue
Definition: ffplay.c:191
format_opts
AVDictionary * format_opts
Definition: cmdutils.c:60
AVSEEK_FLAG_BYTE
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2225
AVIOContext::error
int error
contains the error code or 0 if no error happened
Definition: avio.h:249
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:75
AV_PIX_FMT_RGB32_1
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:442
VideoState::audio_diff_avg_coef
double audio_diff_avg_coef
Definition: ffplay.c:239
av_hwdevice_find_type_by_name
enum AVHWDeviceType av_hwdevice_find_type_by_name(const char *name)
Look up an AVHWDeviceType by name.
Definition: hwcontext.c:83
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
CURSOR_HIDE_DELAY
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:107
SDL_VOLUME_STEP
#define SDL_VOLUME_STEP
Definition: ffplay.c:78
AVComplexFloat
Definition: tx.h:27
show_help_children
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
Definition: cmdutils.c:138
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
autorotate
static int autorotate
Definition: ffplay.c:351
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:509
TextureFormatEntry::texture_fmt
int texture_fmt
Definition: ffplay.c:373
video_disable
static int video_disable
Definition: ffplay.c:318
Frame::uploaded
int uploaded
Definition: ffplay.c:166
mathematics.h
sws_scale
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:1204
AVDictionary
Definition: dict.c:34
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:314
compute_target_delay
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1523
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Frame
Definition: ffplay.c:155
opt_input_file
static int opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3579
stream_close
static void stream_close(VideoState *is)
Definition: ffplay.c:1260
av_read_frame
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: demux.c:1462
SDL_AUDIO_MAX_CALLBACKS_PER_SEC
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:75
vk_renderer_destroy
void vk_renderer_destroy(VkRenderer *renderer)
Definition: ffplay_renderer.c:821
VideoState::paused
int paused
Definition: ffplay.c:209
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:319
init_clock
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1404
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
AV_OPT_FLAG_FILTERING_PARAM
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:297
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:370
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
get_master_clock
static double get_master_clock(VideoState *is)
Definition: ffplay.c:1437
VideoState::width
int width
Definition: ffplay.c:292
file_iformat
static const AVInputFormat * file_iformat
Definition: ffplay.c:308
sample_rate
sample_rate
Definition: ffmpeg_filter.c:368
dummy
static int dummy
Definition: ffplay.c:3616
FF_QUIT_EVENT
#define FF_QUIT_EVENT
Definition: ffplay.c:362
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:74
PacketQueue
Definition: ffplay.c:116
av_tx_init
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
Definition: tx.c:901
subtitle_thread
static int subtitle_thread(void *arg)
Definition: ffplay.c:2231
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:742
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
VideoState::last_subtitle_stream
int last_subtitle_stream
Definition: ffplay.c:302
VideoState::SHOW_MODE_NONE
@ SHOW_MODE_NONE
Definition: ffplay.c:261
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:119
OptionDef
Definition: cmdutils.h:106
audio_decode_frame
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2334
subtitle_disable
static int subtitle_disable
Definition: ffplay.c:319
VideoState::pictq
FrameQueue pictq
Definition: ffplay.c:224
genpts
static int genpts
Definition: ffplay.c:332
VideoState::swr_ctx
struct SwrContext * swr_ctx
Definition: ffplay.c:256
opt_sync
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3548
AVFrame::opaque_ref
AVBufferRef * opaque_ref
Frame owner's private data.
Definition: frame.h:768
step_to_next_frame
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1515
upload_texture
static int upload_texture(SDL_Texture **tex, AVFrame *frame)
Definition: ffplay.c:906
VideoState::sampq
FrameQueue sampq
Definition: ffplay.c:226
TextureFormatEntry::format
enum AVPixelFormat format
Definition: ffplay.c:372
FrameQueue::rindex
int rindex
Definition: ffplay.c:172
video_display
static void video_display(VideoState *is)
Definition: ffplay.c:1358
AVCodec::max_lowres
uint8_t max_lowres
maximum value for lowres supported by the decoder
Definition: codec.h:207
avformat_close_input
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: demux.c:374
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:605
AVFormatContext::interrupt_callback
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1381
SDL_AUDIO_MIN_BUFFER_SIZE
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:73
print_error
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:829
startup_volume
static int startup_volume
Definition: ffplay.c:326
window
static SDL_Window * window
Definition: ffplay.c:364
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
Definition: avfiltergraph.c:138
fifo.h
toggle_full_screen
static void toggle_full_screen(VideoState *is)
Definition: ffplay.c:3256
Clock::queue_serial
int * queue_serial
Definition: ffplay.c:147
VideoState::extclk
Clock extclk
Definition: ffplay.c:222
VideoState::seek_flags
int seek_flags
Definition: ffplay.c:213
alwaysontop
static int alwaysontop
Definition: ffplay.c:325
VideoState::audio_st
AVStream * audio_st
Definition: ffplay.c:242
packet_queue_init
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:470
AUDIO_DIFF_AVG_NB
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:98
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:2107
fail
#define fail()
Definition: checkasm.h:142
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:82
FrameQueue
Definition: ffplay.c:170
packet_queue_put
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:441
samplefmt.h
AVSubtitleRect::x
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2242
VideoState::video_stream
int video_stream
Definition: ffplay.c:284
autoexit
static int autoexit
Definition: ffplay.c:335
avio_tell
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:513
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
AVChapter
Definition: avformat.h:1074
video_image_display
static void video_image_display(VideoState *is)
Definition: ffplay.c:956
val
static double val(void *priv, double ch)
Definition: aeval.c:78
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:802
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
opt_show_mode
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3563
Decoder::empty_queue_cond
SDL_cond * empty_queue_cond
Definition: ffplay.c:196
pts
static int64_t pts
Definition: transcode_aac.c:643
set_clock_speed
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1398
VideoState::audio_diff_threshold
double audio_diff_threshold
Definition: ffplay.c:240
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:487
VideoState::audio_diff_cum
double audio_diff_cum
Definition: ffplay.c:238
VideoState::last_video_stream
int last_video_stream
Definition: ffplay.c:302
OPT_STRING
#define OPT_STRING
Definition: cmdutils.h:112
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
fast
static int fast
Definition: ffplay.c:331
loop
static int loop
Definition: ffplay.c:338
AVRational::num
int num
Numerator.
Definition: rational.h:59
avformat_network_init
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:558
VideoState::rdft_bits
int rdft_bits
Definition: ffplay.c:268
opt_height
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3527
avsubtitle_free
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: avcodec.c:413
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:88
sdl_texture_format_map
static const struct TextureFormatEntry sdl_texture_format_map[]
AVFormatContext::bit_rate
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1224
is_full_screen
static int is_full_screen
Definition: ffplay.c:359
avcodec_decode_subtitle2
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, const AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:969
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:75
set_sdl_yuv_conversion_mode
static void set_sdl_yuv_conversion_mode(AVFrame *frame)
Definition: ffplay.c:940
lrint
#define lrint
Definition: tablegen.h:53
Frame::flip_v
int flip_v
Definition: ffplay.c:167
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:206
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
AVFormatContext::metadata
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1343
vk_get_renderer
VkRenderer * vk_get_renderer(void)
Definition: ffplay_renderer.c:793
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
AVInputFormat
Definition: avformat.h:549
audio_thread
static int audio_thread(void *arg)
Definition: ffplay.c:2039
set_clock
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1392
av_dump_format
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
Definition: dump.c:629
VideoState
Definition: ffplay.c:204
frame_queue_peek_next
static Frame * frame_queue_peek_next(FrameQueue *f)
Definition: ffplay.c:734
sdl_audio_callback
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
Definition: ffplay.c:2443
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
sync_clock_to_slave
static void sync_clock_to_slave(Clock *c, Clock *slave)
Definition: ffplay.c:1412
av_tx_fn
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Definition: tx.h:151
swr_init
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:193
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
avformat_open_input
int avformat_open_input(AVFormatContext **ps, const char *url, const AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: demux.c:226
frame_queue_signal
static void frame_queue_signal(FrameQueue *f)
Definition: ffplay.c:722
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:62
av_channel_layout_describe
int av_channel_layout_describe(const AVChannelLayout *channel_layout, char *buf, size_t buf_size)
Get a human-readable string describing the channel layout properties.
Definition: channel_layout.c:788
OPT_INT
#define OPT_INT
Definition: cmdutils.h:115
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:153
width
#define width
VideoState::ShowMode
ShowMode
Definition: ffplay.c:260
Decoder::avctx
AVCodecContext * avctx
Definition: ffplay.c:192
s
#define s(width, name)
Definition: cbs_vp9.c:198
show_help_default
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3679
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
default_height
static int default_height
Definition: ffplay.c:312
AVFormatContext::flags
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1233
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
AVCodecParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only.
Definition: codec_par.h:131
AVInputFormat::name
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:554
AVFormatContext::iformat
const struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1127
AV_PIX_FMT_0BGR32
#define AV_PIX_FMT_0BGR32
Definition: pixfmt.h:446
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:606
AVDictionaryEntry::key
char * key
Definition: dict.h:90
Clock::last_updated
double last_updated
Definition: ffplay.c:143
PacketQueue::duration
int64_t duration
Definition: ffplay.c:120
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
AVSubtitleRect::y
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2243