FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
34 #include "libavutil/eval.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/avassert.h"
42 #include "libavutil/time.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavcodec/avfft.h"
49 
50 #if CONFIG_AVFILTER
51 # include "libavfilter/avfilter.h"
52 # include "libavfilter/buffersink.h"
53 # include "libavfilter/buffersrc.h"
54 #endif
55 
56 #include <SDL.h>
57 #include <SDL_thread.h>
58 
59 #include "cmdutils.h"
60 
61 #include <assert.h>
62 
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65 
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 25
68 #define EXTERNAL_CLOCK_MIN_FRAMES 2
69 #define EXTERNAL_CLOCK_MAX_FRAMES 10
70 
71 /* Minimum SDL audio buffer size, in samples. */
72 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
73 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
74 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
75 
76 /* Step size for volume control in dB */
77 #define SDL_VOLUME_STEP (0.75)
78 
79 /* no AV sync correction is done if below the minimum AV sync threshold */
80 #define AV_SYNC_THRESHOLD_MIN 0.04
81 /* AV sync correction is done if above the maximum AV sync threshold */
82 #define AV_SYNC_THRESHOLD_MAX 0.1
83 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
84 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
85 /* no AV correction is done if too big error */
86 #define AV_NOSYNC_THRESHOLD 10.0
87 
88 /* maximum audio speed change to get correct sync */
89 #define SAMPLE_CORRECTION_PERCENT_MAX 10
90 
91 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
92 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
93 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
94 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
95 
96 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
97 #define AUDIO_DIFF_AVG_NB 20
98 
99 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
100 #define REFRESH_RATE 0.01
101 
102 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
103 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
104 #define SAMPLE_ARRAY_SIZE (8 * 65536)
105 
106 #define CURSOR_HIDE_DELAY 1000000
107 
108 #define USE_ONEPASS_SUBTITLE_RENDER 1
109 
110 static unsigned sws_flags = SWS_BICUBIC;
111 
112 typedef struct MyAVPacketList {
115  int serial;
117 
118 typedef struct PacketQueue {
121  int size;
122  int64_t duration;
124  int serial;
125  SDL_mutex *mutex;
126  SDL_cond *cond;
127 } PacketQueue;
128 
129 #define VIDEO_PICTURE_QUEUE_SIZE 3
130 #define SUBPICTURE_QUEUE_SIZE 16
131 #define SAMPLE_QUEUE_SIZE 9
132 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
133 
134 typedef struct AudioParams {
135  int freq;
136  int channels;
137  int64_t channel_layout;
141 } AudioParams;
142 
143 typedef struct Clock {
144  double pts; /* clock base */
145  double pts_drift; /* clock base minus time at which we updated the clock */
146  double last_updated;
147  double speed;
148  int serial; /* clock is based on a packet with this serial */
149  int paused;
150  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
151 } Clock;
152 
153 /* Common struct for handling all types of decoded data and allocated render buffers. */
154 typedef struct Frame {
157  int serial;
158  double pts; /* presentation timestamp for the frame */
159  double duration; /* estimated duration of the frame */
160  int64_t pos; /* byte position of the frame in the input file */
161  int width;
162  int height;
163  int format;
165  int uploaded;
166  int flip_v;
167 } Frame;
168 
169 typedef struct FrameQueue {
171  int rindex;
172  int windex;
173  int size;
174  int max_size;
177  SDL_mutex *mutex;
178  SDL_cond *cond;
180 } FrameQueue;
181 
182 enum {
183  AV_SYNC_AUDIO_MASTER, /* default choice */
185  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
186 };
187 
188 typedef struct Decoder {
193  int finished;
195  SDL_cond *empty_queue_cond;
196  int64_t start_pts;
198  int64_t next_pts;
200  SDL_Thread *decoder_tid;
201 } Decoder;
202 
203 typedef struct VideoState {
204  SDL_Thread *read_tid;
208  int paused;
211  int seek_req;
213  int64_t seek_pos;
214  int64_t seek_rel;
217  int realtime;
218 
222 
226 
230 
232 
234 
235  double audio_clock;
237  double audio_diff_cum; /* used for AV difference average computation */
246  unsigned int audio_buf_size; /* in bytes */
247  unsigned int audio_buf1_size;
248  int audio_buf_index; /* in bytes */
251  int muted;
253 #if CONFIG_AVFILTER
254  struct AudioParams audio_filter_src;
255 #endif
260 
261  enum ShowMode {
263  } show_mode;
270  int xpos;
272  SDL_Texture *vis_texture;
273  SDL_Texture *sub_texture;
274  SDL_Texture *vid_texture;
275 
279 
280  double frame_timer;
286  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
289  int eof;
290 
291  char *filename;
293  int step;
294 
295 #if CONFIG_AVFILTER
296  int vfilter_idx;
297  AVFilterContext *in_video_filter; // the first filter in the video chain
298  AVFilterContext *out_video_filter; // the last filter in the video chain
299  AVFilterContext *in_audio_filter; // the first filter in the audio chain
300  AVFilterContext *out_audio_filter; // the last filter in the audio chain
301  AVFilterGraph *agraph; // audio filter graph
302 #endif
303 
305 
307 } VideoState;
308 
309 /* options specified by the user */
311 static const char *input_filename;
312 static const char *window_title;
313 static int default_width = 640;
314 static int default_height = 480;
315 static int screen_width = 0;
316 static int screen_height = 0;
317 static int screen_left = SDL_WINDOWPOS_CENTERED;
318 static int screen_top = SDL_WINDOWPOS_CENTERED;
319 static int audio_disable;
320 static int video_disable;
321 static int subtitle_disable;
322 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
323 static int seek_by_bytes = -1;
324 static float seek_interval = 10;
325 static int display_disable;
326 static int borderless;
327 static int startup_volume = 100;
328 static int show_status = 1;
330 static int64_t start_time = AV_NOPTS_VALUE;
331 static int64_t duration = AV_NOPTS_VALUE;
332 static int fast = 0;
333 static int genpts = 0;
334 static int lowres = 0;
335 static int decoder_reorder_pts = -1;
336 static int autoexit;
337 static int exit_on_keydown;
338 static int exit_on_mousedown;
339 static int loop = 1;
340 static int framedrop = -1;
341 static int infinite_buffer = -1;
342 static enum ShowMode show_mode = SHOW_MODE_NONE;
343 static const char *audio_codec_name;
344 static const char *subtitle_codec_name;
345 static const char *video_codec_name;
346 double rdftspeed = 0.02;
347 static int64_t cursor_last_shown;
348 static int cursor_hidden = 0;
349 #if CONFIG_AVFILTER
350 static const char **vfilters_list = NULL;
351 static int nb_vfilters = 0;
352 static char *afilters = NULL;
353 #endif
354 static int autorotate = 1;
355 static int find_stream_info = 1;
356 
357 /* current context */
358 static int is_full_screen;
359 static int64_t audio_callback_time;
360 
362 
363 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
364 
365 static SDL_Window *window;
366 static SDL_Renderer *renderer;
367 static SDL_RendererInfo renderer_info = {0};
368 static SDL_AudioDeviceID audio_dev;
369 
370 static const struct TextureFormatEntry {
374  { AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
375  { AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
376  { AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
377  { AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
378  { AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
379  { AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
380  { AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
381  { AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
382  { AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
383  { AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
384  { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
385  { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
386  { AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
387  { AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
388  { AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
389  { AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
390  { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
391  { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
392  { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
393  { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
394 };
395 
396 #if CONFIG_AVFILTER
397 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
398 {
399  GROW_ARRAY(vfilters_list, nb_vfilters);
400  vfilters_list[nb_vfilters - 1] = arg;
401  return 0;
402 }
403 #endif
404 
405 static inline
406 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
407  enum AVSampleFormat fmt2, int64_t channel_count2)
408 {
409  /* If channel count == 1, planar and non-planar formats are the same */
410  if (channel_count1 == 1 && channel_count2 == 1)
412  else
413  return channel_count1 != channel_count2 || fmt1 != fmt2;
414 }
415 
416 static inline
417 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
418 {
419  if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
420  return channel_layout;
421  else
422  return 0;
423 }
424 
426 {
427  MyAVPacketList *pkt1;
428 
429  if (q->abort_request)
430  return -1;
431 
432  pkt1 = av_malloc(sizeof(MyAVPacketList));
433  if (!pkt1)
434  return -1;
435  pkt1->pkt = *pkt;
436  pkt1->next = NULL;
437  if (pkt == &flush_pkt)
438  q->serial++;
439  pkt1->serial = q->serial;
440 
441  if (!q->last_pkt)
442  q->first_pkt = pkt1;
443  else
444  q->last_pkt->next = pkt1;
445  q->last_pkt = pkt1;
446  q->nb_packets++;
447  q->size += pkt1->pkt.size + sizeof(*pkt1);
448  q->duration += pkt1->pkt.duration;
449  /* XXX: should duplicate packet data in DV case */
450  SDL_CondSignal(q->cond);
451  return 0;
452 }
453 
455 {
456  int ret;
457 
458  SDL_LockMutex(q->mutex);
459  ret = packet_queue_put_private(q, pkt);
460  SDL_UnlockMutex(q->mutex);
461 
462  if (pkt != &flush_pkt && ret < 0)
463  av_packet_unref(pkt);
464 
465  return ret;
466 }
467 
468 static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
469 {
470  AVPacket pkt1, *pkt = &pkt1;
471  av_init_packet(pkt);
472  pkt->data = NULL;
473  pkt->size = 0;
474  pkt->stream_index = stream_index;
475  return packet_queue_put(q, pkt);
476 }
477 
478 /* packet queue handling */
480 {
481  memset(q, 0, sizeof(PacketQueue));
482  q->mutex = SDL_CreateMutex();
483  if (!q->mutex) {
484  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
485  return AVERROR(ENOMEM);
486  }
487  q->cond = SDL_CreateCond();
488  if (!q->cond) {
489  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
490  return AVERROR(ENOMEM);
491  }
492  q->abort_request = 1;
493  return 0;
494 }
495 
497 {
498  MyAVPacketList *pkt, *pkt1;
499 
500  SDL_LockMutex(q->mutex);
501  for (pkt = q->first_pkt; pkt; pkt = pkt1) {
502  pkt1 = pkt->next;
503  av_packet_unref(&pkt->pkt);
504  av_freep(&pkt);
505  }
506  q->last_pkt = NULL;
507  q->first_pkt = NULL;
508  q->nb_packets = 0;
509  q->size = 0;
510  q->duration = 0;
511  SDL_UnlockMutex(q->mutex);
512 }
513 
515 {
517  SDL_DestroyMutex(q->mutex);
518  SDL_DestroyCond(q->cond);
519 }
520 
522 {
523  SDL_LockMutex(q->mutex);
524 
525  q->abort_request = 1;
526 
527  SDL_CondSignal(q->cond);
528 
529  SDL_UnlockMutex(q->mutex);
530 }
531 
533 {
534  SDL_LockMutex(q->mutex);
535  q->abort_request = 0;
536  packet_queue_put_private(q, &flush_pkt);
537  SDL_UnlockMutex(q->mutex);
538 }
539 
540 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
541 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
542 {
543  MyAVPacketList *pkt1;
544  int ret;
545 
546  SDL_LockMutex(q->mutex);
547 
548  for (;;) {
549  if (q->abort_request) {
550  ret = -1;
551  break;
552  }
553 
554  pkt1 = q->first_pkt;
555  if (pkt1) {
556  q->first_pkt = pkt1->next;
557  if (!q->first_pkt)
558  q->last_pkt = NULL;
559  q->nb_packets--;
560  q->size -= pkt1->pkt.size + sizeof(*pkt1);
561  q->duration -= pkt1->pkt.duration;
562  *pkt = pkt1->pkt;
563  if (serial)
564  *serial = pkt1->serial;
565  av_free(pkt1);
566  ret = 1;
567  break;
568  } else if (!block) {
569  ret = 0;
570  break;
571  } else {
572  SDL_CondWait(q->cond, q->mutex);
573  }
574  }
575  SDL_UnlockMutex(q->mutex);
576  return ret;
577 }
578 
579 static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
580  memset(d, 0, sizeof(Decoder));
581  d->avctx = avctx;
582  d->queue = queue;
583  d->empty_queue_cond = empty_queue_cond;
585  d->pkt_serial = -1;
586 }
587 
589  int ret = AVERROR(EAGAIN);
590 
591  for (;;) {
592  AVPacket pkt;
593 
594  if (d->queue->serial == d->pkt_serial) {
595  do {
596  if (d->queue->abort_request)
597  return -1;
598 
599  switch (d->avctx->codec_type) {
600  case AVMEDIA_TYPE_VIDEO:
601  ret = avcodec_receive_frame(d->avctx, frame);
602  if (ret >= 0) {
603  if (decoder_reorder_pts == -1) {
604  frame->pts = frame->best_effort_timestamp;
605  } else if (!decoder_reorder_pts) {
606  frame->pts = frame->pkt_dts;
607  }
608  }
609  break;
610  case AVMEDIA_TYPE_AUDIO:
611  ret = avcodec_receive_frame(d->avctx, frame);
612  if (ret >= 0) {
613  AVRational tb = (AVRational){1, frame->sample_rate};
614  if (frame->pts != AV_NOPTS_VALUE)
615  frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);
616  else if (d->next_pts != AV_NOPTS_VALUE)
617  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
618  if (frame->pts != AV_NOPTS_VALUE) {
619  d->next_pts = frame->pts + frame->nb_samples;
620  d->next_pts_tb = tb;
621  }
622  }
623  break;
624  }
625  if (ret == AVERROR_EOF) {
626  d->finished = d->pkt_serial;
628  return 0;
629  }
630  if (ret >= 0)
631  return 1;
632  } while (ret != AVERROR(EAGAIN));
633  }
634 
635  do {
636  if (d->queue->nb_packets == 0)
637  SDL_CondSignal(d->empty_queue_cond);
638  if (d->packet_pending) {
639  av_packet_move_ref(&pkt, &d->pkt);
640  d->packet_pending = 0;
641  } else {
642  if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
643  return -1;
644  }
645  } while (d->queue->serial != d->pkt_serial);
646 
647  if (pkt.data == flush_pkt.data) {
649  d->finished = 0;
650  d->next_pts = d->start_pts;
651  d->next_pts_tb = d->start_pts_tb;
652  } else {
654  int got_frame = 0;
655  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &pkt);
656  if (ret < 0) {
657  ret = AVERROR(EAGAIN);
658  } else {
659  if (got_frame && !pkt.data) {
660  d->packet_pending = 1;
661  av_packet_move_ref(&d->pkt, &pkt);
662  }
663  ret = got_frame ? 0 : (pkt.data ? AVERROR(EAGAIN) : AVERROR_EOF);
664  }
665  } else {
666  if (avcodec_send_packet(d->avctx, &pkt) == AVERROR(EAGAIN)) {
667  av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
668  d->packet_pending = 1;
669  av_packet_move_ref(&d->pkt, &pkt);
670  }
671  }
672  av_packet_unref(&pkt);
673  }
674  }
675 }
676 
677 static void decoder_destroy(Decoder *d) {
678  av_packet_unref(&d->pkt);
680 }
681 
683 {
684  av_frame_unref(vp->frame);
685  avsubtitle_free(&vp->sub);
686 }
687 
688 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
689 {
690  int i;
691  memset(f, 0, sizeof(FrameQueue));
692  if (!(f->mutex = SDL_CreateMutex())) {
693  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
694  return AVERROR(ENOMEM);
695  }
696  if (!(f->cond = SDL_CreateCond())) {
697  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
698  return AVERROR(ENOMEM);
699  }
700  f->pktq = pktq;
701  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
702  f->keep_last = !!keep_last;
703  for (i = 0; i < f->max_size; i++)
704  if (!(f->queue[i].frame = av_frame_alloc()))
705  return AVERROR(ENOMEM);
706  return 0;
707 }
708 
710 {
711  int i;
712  for (i = 0; i < f->max_size; i++) {
713  Frame *vp = &f->queue[i];
715  av_frame_free(&vp->frame);
716  }
717  SDL_DestroyMutex(f->mutex);
718  SDL_DestroyCond(f->cond);
719 }
720 
722 {
723  SDL_LockMutex(f->mutex);
724  SDL_CondSignal(f->cond);
725  SDL_UnlockMutex(f->mutex);
726 }
727 
729 {
730  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
731 }
732 
734 {
735  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
736 }
737 
739 {
740  return &f->queue[f->rindex];
741 }
742 
744 {
745  /* wait until we have space to put a new frame */
746  SDL_LockMutex(f->mutex);
747  while (f->size >= f->max_size &&
748  !f->pktq->abort_request) {
749  SDL_CondWait(f->cond, f->mutex);
750  }
751  SDL_UnlockMutex(f->mutex);
752 
753  if (f->pktq->abort_request)
754  return NULL;
755 
756  return &f->queue[f->windex];
757 }
758 
760 {
761  /* wait until we have a readable a new frame */
762  SDL_LockMutex(f->mutex);
763  while (f->size - f->rindex_shown <= 0 &&
764  !f->pktq->abort_request) {
765  SDL_CondWait(f->cond, f->mutex);
766  }
767  SDL_UnlockMutex(f->mutex);
768 
769  if (f->pktq->abort_request)
770  return NULL;
771 
772  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
773 }
774 
776 {
777  if (++f->windex == f->max_size)
778  f->windex = 0;
779  SDL_LockMutex(f->mutex);
780  f->size++;
781  SDL_CondSignal(f->cond);
782  SDL_UnlockMutex(f->mutex);
783 }
784 
786 {
787  if (f->keep_last && !f->rindex_shown) {
788  f->rindex_shown = 1;
789  return;
790  }
792  if (++f->rindex == f->max_size)
793  f->rindex = 0;
794  SDL_LockMutex(f->mutex);
795  f->size--;
796  SDL_CondSignal(f->cond);
797  SDL_UnlockMutex(f->mutex);
798 }
799 
800 /* return the number of undisplayed frames in the queue */
802 {
803  return f->size - f->rindex_shown;
804 }
805 
806 /* return last shown position */
808 {
809  Frame *fp = &f->queue[f->rindex];
810  if (f->rindex_shown && fp->serial == f->pktq->serial)
811  return fp->pos;
812  else
813  return -1;
814 }
815 
816 static void decoder_abort(Decoder *d, FrameQueue *fq)
817 {
819  frame_queue_signal(fq);
820  SDL_WaitThread(d->decoder_tid, NULL);
821  d->decoder_tid = NULL;
823 }
824 
825 static inline void fill_rectangle(int x, int y, int w, int h)
826 {
827  SDL_Rect rect;
828  rect.x = x;
829  rect.y = y;
830  rect.w = w;
831  rect.h = h;
832  if (w && h)
833  SDL_RenderFillRect(renderer, &rect);
834 }
835 
836 static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
837 {
838  Uint32 format;
839  int access, w, h;
840  if (!*texture || SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
841  void *pixels;
842  int pitch;
843  if (*texture)
844  SDL_DestroyTexture(*texture);
845  if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
846  return -1;
847  if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
848  return -1;
849  if (init_texture) {
850  if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
851  return -1;
852  memset(pixels, 0, pitch * new_height);
853  SDL_UnlockTexture(*texture);
854  }
855  av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
856  }
857  return 0;
858 }
859 
860 static void calculate_display_rect(SDL_Rect *rect,
861  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
862  int pic_width, int pic_height, AVRational pic_sar)
863 {
864  AVRational aspect_ratio = pic_sar;
865  int64_t width, height, x, y;
866 
867  if (av_cmp_q(aspect_ratio, av_make_q(0, 1)) <= 0)
868  aspect_ratio = av_make_q(1, 1);
869 
870  aspect_ratio = av_mul_q(aspect_ratio, av_make_q(pic_width, pic_height));
871 
872  /* XXX: we suppose the screen has a 1.0 pixel ratio */
873  height = scr_height;
874  width = av_rescale(height, aspect_ratio.num, aspect_ratio.den) & ~1;
875  if (width > scr_width) {
876  width = scr_width;
877  height = av_rescale(width, aspect_ratio.den, aspect_ratio.num) & ~1;
878  }
879  x = (scr_width - width) / 2;
880  y = (scr_height - height) / 2;
881  rect->x = scr_xleft + x;
882  rect->y = scr_ytop + y;
883  rect->w = FFMAX((int)width, 1);
884  rect->h = FFMAX((int)height, 1);
885 }
886 
887 static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
888 {
889  int i;
890  *sdl_blendmode = SDL_BLENDMODE_NONE;
891  *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
892  if (format == AV_PIX_FMT_RGB32 ||
893  format == AV_PIX_FMT_RGB32_1 ||
894  format == AV_PIX_FMT_BGR32 ||
895  format == AV_PIX_FMT_BGR32_1)
896  *sdl_blendmode = SDL_BLENDMODE_BLEND;
897  for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
898  if (format == sdl_texture_format_map[i].format) {
899  *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
900  return;
901  }
902  }
903 }
904 
905 static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx) {
906  int ret = 0;
907  Uint32 sdl_pix_fmt;
908  SDL_BlendMode sdl_blendmode;
909  get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
910  if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
911  return -1;
912  switch (sdl_pix_fmt) {
913  case SDL_PIXELFORMAT_UNKNOWN:
914  /* This should only happen if we are not using avfilter... */
915  *img_convert_ctx = sws_getCachedContext(*img_convert_ctx,
916  frame->width, frame->height, frame->format, frame->width, frame->height,
918  if (*img_convert_ctx != NULL) {
919  uint8_t *pixels[4];
920  int pitch[4];
921  if (!SDL_LockTexture(*tex, NULL, (void **)pixels, pitch)) {
922  sws_scale(*img_convert_ctx, (const uint8_t * const *)frame->data, frame->linesize,
923  0, frame->height, pixels, pitch);
924  SDL_UnlockTexture(*tex);
925  }
926  } else {
927  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
928  ret = -1;
929  }
930  break;
931  case SDL_PIXELFORMAT_IYUV:
932  if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
933  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
934  frame->data[1], frame->linesize[1],
935  frame->data[2], frame->linesize[2]);
936  } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
937  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
938  frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
939  frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
940  } else {
941  av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
942  return -1;
943  }
944  break;
945  default:
946  if (frame->linesize[0] < 0) {
947  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
948  } else {
949  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
950  }
951  break;
952  }
953  return ret;
954 }
955 
957 {
958 #if SDL_VERSION_ATLEAST(2,0,8)
959  SDL_YUV_CONVERSION_MODE mode = SDL_YUV_CONVERSION_AUTOMATIC;
960  if (frame && (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUYV422 || frame->format == AV_PIX_FMT_UYVY422)) {
961  if (frame->color_range == AVCOL_RANGE_JPEG)
962  mode = SDL_YUV_CONVERSION_JPEG;
963  else if (frame->colorspace == AVCOL_SPC_BT709)
964  mode = SDL_YUV_CONVERSION_BT709;
965  else if (frame->colorspace == AVCOL_SPC_BT470BG || frame->colorspace == AVCOL_SPC_SMPTE170M || frame->colorspace == AVCOL_SPC_SMPTE240M)
966  mode = SDL_YUV_CONVERSION_BT601;
967  }
968  SDL_SetYUVConversionMode(mode);
969 #endif
970 }
971 
973 {
974  Frame *vp;
975  Frame *sp = NULL;
976  SDL_Rect rect;
977 
978  vp = frame_queue_peek_last(&is->pictq);
979  if (is->subtitle_st) {
980  if (frame_queue_nb_remaining(&is->subpq) > 0) {
981  sp = frame_queue_peek(&is->subpq);
982 
983  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
984  if (!sp->uploaded) {
985  uint8_t* pixels[4];
986  int pitch[4];
987  int i;
988  if (!sp->width || !sp->height) {
989  sp->width = vp->width;
990  sp->height = vp->height;
991  }
992  if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
993  return;
994 
995  for (i = 0; i < sp->sub.num_rects; i++) {
996  AVSubtitleRect *sub_rect = sp->sub.rects[i];
997 
998  sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
999  sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
1000  sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
1001  sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
1002 
1004  sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
1005  sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
1006  0, NULL, NULL, NULL);
1007  if (!is->sub_convert_ctx) {
1008  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1009  return;
1010  }
1011  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) {
1012  sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
1013  0, sub_rect->h, pixels, pitch);
1014  SDL_UnlockTexture(is->sub_texture);
1015  }
1016  }
1017  sp->uploaded = 1;
1018  }
1019  } else
1020  sp = NULL;
1021  }
1022  }
1023 
1024  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1025 
1026  if (!vp->uploaded) {
1027  if (upload_texture(&is->vid_texture, vp->frame, &is->img_convert_ctx) < 0)
1028  return;
1029  vp->uploaded = 1;
1030  vp->flip_v = vp->frame->linesize[0] < 0;
1031  }
1032 
1034  SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
1036  if (sp) {
1037 #if USE_ONEPASS_SUBTITLE_RENDER
1038  SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
1039 #else
1040  int i;
1041  double xratio = (double)rect.w / (double)sp->width;
1042  double yratio = (double)rect.h / (double)sp->height;
1043  for (i = 0; i < sp->sub.num_rects; i++) {
1044  SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
1045  SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
1046  .y = rect.y + sub_rect->y * yratio,
1047  .w = sub_rect->w * xratio,
1048  .h = sub_rect->h * yratio};
1049  SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
1050  }
1051 #endif
1052  }
1053 }
1054 
1055 static inline int compute_mod(int a, int b)
1056 {
1057  return a < 0 ? a%b + b : a%b;
1058 }
1059 
1061 {
1062  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1063  int ch, channels, h, h2;
1064  int64_t time_diff;
1065  int rdft_bits, nb_freq;
1066 
1067  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1068  ;
1069  nb_freq = 1 << (rdft_bits - 1);
1070 
1071  /* compute display index : center on currently output samples */
1072  channels = s->audio_tgt.channels;
1073  nb_display_channels = channels;
1074  if (!s->paused) {
1075  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1076  n = 2 * channels;
1077  delay = s->audio_write_buf_size;
1078  delay /= n;
1079 
1080  /* to be more precise, we take into account the time spent since
1081  the last buffer computation */
1082  if (audio_callback_time) {
1083  time_diff = av_gettime_relative() - audio_callback_time;
1084  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1085  }
1086 
1087  delay += 2 * data_used;
1088  if (delay < data_used)
1089  delay = data_used;
1090 
1091  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1092  if (s->show_mode == SHOW_MODE_WAVES) {
1093  h = INT_MIN;
1094  for (i = 0; i < 1000; i += channels) {
1095  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1096  int a = s->sample_array[idx];
1097  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1098  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1099  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1100  int score = a - d;
1101  if (h < score && (b ^ c) < 0) {
1102  h = score;
1103  i_start = idx;
1104  }
1105  }
1106  }
1107 
1108  s->last_i_start = i_start;
1109  } else {
1110  i_start = s->last_i_start;
1111  }
1112 
1113  if (s->show_mode == SHOW_MODE_WAVES) {
1114  SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
1115 
1116  /* total height for one channel */
1117  h = s->height / nb_display_channels;
1118  /* graph height / 2 */
1119  h2 = (h * 9) / 20;
1120  for (ch = 0; ch < nb_display_channels; ch++) {
1121  i = i_start + ch;
1122  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1123  for (x = 0; x < s->width; x++) {
1124  y = (s->sample_array[i] * h2) >> 15;
1125  if (y < 0) {
1126  y = -y;
1127  ys = y1 - y;
1128  } else {
1129  ys = y1;
1130  }
1131  fill_rectangle(s->xleft + x, ys, 1, y);
1132  i += channels;
1133  if (i >= SAMPLE_ARRAY_SIZE)
1134  i -= SAMPLE_ARRAY_SIZE;
1135  }
1136  }
1137 
1138  SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
1139 
1140  for (ch = 1; ch < nb_display_channels; ch++) {
1141  y = s->ytop + ch * h;
1142  fill_rectangle(s->xleft, y, s->width, 1);
1143  }
1144  } else {
1145  if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
1146  return;
1147 
1148  nb_display_channels= FFMIN(nb_display_channels, 2);
1149  if (rdft_bits != s->rdft_bits) {
1150  av_rdft_end(s->rdft);
1151  av_free(s->rdft_data);
1152  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
1153  s->rdft_bits = rdft_bits;
1154  s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
1155  }
1156  if (!s->rdft || !s->rdft_data){
1157  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1158  s->show_mode = SHOW_MODE_WAVES;
1159  } else {
1160  FFTSample *data[2];
1161  SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
1162  uint32_t *pixels;
1163  int pitch;
1164  for (ch = 0; ch < nb_display_channels; ch++) {
1165  data[ch] = s->rdft_data + 2 * nb_freq * ch;
1166  i = i_start + ch;
1167  for (x = 0; x < 2 * nb_freq; x++) {
1168  double w = (x-nb_freq) * (1.0 / nb_freq);
1169  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
1170  i += channels;
1171  if (i >= SAMPLE_ARRAY_SIZE)
1172  i -= SAMPLE_ARRAY_SIZE;
1173  }
1174  av_rdft_calc(s->rdft, data[ch]);
1175  }
1176  /* Least efficient way to do this, we should of course
1177  * directly access it but it is more than fast enough. */
1178  if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
1179  pitch >>= 2;
1180  pixels += pitch * s->height;
1181  for (y = 0; y < s->height; y++) {
1182  double w = 1 / sqrt(nb_freq);
1183  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1184  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][2 * y + 0], data[1][2 * y + 1]))
1185  : a;
1186  a = FFMIN(a, 255);
1187  b = FFMIN(b, 255);
1188  pixels -= pitch;
1189  *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
1190  }
1191  SDL_UnlockTexture(s->vis_texture);
1192  }
1193  SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
1194  }
1195  if (!s->paused)
1196  s->xpos++;
1197  if (s->xpos >= s->width)
1198  s->xpos= s->xleft;
1199  }
1200 }
1201 
1202 static void stream_component_close(VideoState *is, int stream_index)
1203 {
1204  AVFormatContext *ic = is->ic;
1205  AVCodecParameters *codecpar;
1206 
1207  if (stream_index < 0 || stream_index >= ic->nb_streams)
1208  return;
1209  codecpar = ic->streams[stream_index]->codecpar;
1210 
1211  switch (codecpar->codec_type) {
1212  case AVMEDIA_TYPE_AUDIO:
1213  decoder_abort(&is->auddec, &is->sampq);
1214  SDL_CloseAudioDevice(audio_dev);
1215  decoder_destroy(&is->auddec);
1216  swr_free(&is->swr_ctx);
1217  av_freep(&is->audio_buf1);
1218  is->audio_buf1_size = 0;
1219  is->audio_buf = NULL;
1220 
1221  if (is->rdft) {
1222  av_rdft_end(is->rdft);
1223  av_freep(&is->rdft_data);
1224  is->rdft = NULL;
1225  is->rdft_bits = 0;
1226  }
1227  break;
1228  case AVMEDIA_TYPE_VIDEO:
1229  decoder_abort(&is->viddec, &is->pictq);
1230  decoder_destroy(&is->viddec);
1231  break;
1232  case AVMEDIA_TYPE_SUBTITLE:
1233  decoder_abort(&is->subdec, &is->subpq);
1234  decoder_destroy(&is->subdec);
1235  break;
1236  default:
1237  break;
1238  }
1239 
1240  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1241  switch (codecpar->codec_type) {
1242  case AVMEDIA_TYPE_AUDIO:
1243  is->audio_st = NULL;
1244  is->audio_stream = -1;
1245  break;
1246  case AVMEDIA_TYPE_VIDEO:
1247  is->video_st = NULL;
1248  is->video_stream = -1;
1249  break;
1250  case AVMEDIA_TYPE_SUBTITLE:
1251  is->subtitle_st = NULL;
1252  is->subtitle_stream = -1;
1253  break;
1254  default:
1255  break;
1256  }
1257 }
1258 
1259 static void stream_close(VideoState *is)
1260 {
1261  /* XXX: use a special url_shutdown call to abort parse cleanly */
1262  is->abort_request = 1;
1263  SDL_WaitThread(is->read_tid, NULL);
1264 
1265  /* close each stream */
1266  if (is->audio_stream >= 0)
1268  if (is->video_stream >= 0)
1270  if (is->subtitle_stream >= 0)
1272 
1273  avformat_close_input(&is->ic);
1274 
1278 
1279  /* free all pictures */
1280  frame_queue_destory(&is->pictq);
1281  frame_queue_destory(&is->sampq);
1282  frame_queue_destory(&is->subpq);
1283  SDL_DestroyCond(is->continue_read_thread);
1286  av_free(is->filename);
1287  if (is->vis_texture)
1288  SDL_DestroyTexture(is->vis_texture);
1289  if (is->vid_texture)
1290  SDL_DestroyTexture(is->vid_texture);
1291  if (is->sub_texture)
1292  SDL_DestroyTexture(is->sub_texture);
1293  av_free(is);
1294 }
1295 
1296 static void do_exit(VideoState *is)
1297 {
1298  if (is) {
1299  stream_close(is);
1300  }
1301  if (renderer)
1302  SDL_DestroyRenderer(renderer);
1303  if (window)
1304  SDL_DestroyWindow(window);
1305  uninit_opts();
1306 #if CONFIG_AVFILTER
1307  av_freep(&vfilters_list);
1308 #endif
1310  if (show_status)
1311  printf("\n");
1312  SDL_Quit();
1313  av_log(NULL, AV_LOG_QUIET, "%s", "");
1314  exit(0);
1315 }
1316 
1317 static void sigterm_handler(int sig)
1318 {
1319  exit(123);
1320 }
1321 
1323 {
1324  SDL_Rect rect;
1325  int max_width = screen_width ? screen_width : INT_MAX;
1326  int max_height = screen_height ? screen_height : INT_MAX;
1327  if (max_width == INT_MAX && max_height == INT_MAX)
1328  max_height = height;
1329  calculate_display_rect(&rect, 0, 0, max_width, max_height, width, height, sar);
1330  default_width = rect.w;
1331  default_height = rect.h;
1332 }
1333 
1334 static int video_open(VideoState *is)
1335 {
1336  int w,h;
1337 
1340 
1341  if (!window_title)
1343  SDL_SetWindowTitle(window, window_title);
1344 
1345  SDL_SetWindowSize(window, w, h);
1346  SDL_SetWindowPosition(window, screen_left, screen_top);
1347  if (is_full_screen)
1348  SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1349  SDL_ShowWindow(window);
1350 
1351  is->width = w;
1352  is->height = h;
1353 
1354  return 0;
1355 }
1356 
1357 /* display the current picture, if any */
1358 static void video_display(VideoState *is)
1359 {
1360  if (!is->width)
1361  video_open(is);
1362 
1363  SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1364  SDL_RenderClear(renderer);
1365  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1366  video_audio_display(is);
1367  else if (is->video_st)
1368  video_image_display(is);
1369  SDL_RenderPresent(renderer);
1370 }
1371 
1372 static double get_clock(Clock *c)
1373 {
1374  if (*c->queue_serial != c->serial)
1375  return NAN;
1376  if (c->paused) {
1377  return c->pts;
1378  } else {
1379  double time = av_gettime_relative() / 1000000.0;
1380  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1381  }
1382 }
1383 
1384 static void set_clock_at(Clock *c, double pts, int serial, double time)
1385 {
1386  c->pts = pts;
1387  c->last_updated = time;
1388  c->pts_drift = c->pts - time;
1389  c->serial = serial;
1390 }
1391 
1392 static void set_clock(Clock *c, double pts, int serial)
1393 {
1394  double time = av_gettime_relative() / 1000000.0;
1395  set_clock_at(c, pts, serial, time);
1396 }
1397 
1398 static void set_clock_speed(Clock *c, double speed)
1399 {
1400  set_clock(c, get_clock(c), c->serial);
1401  c->speed = speed;
1402 }
1403 
1404 static void init_clock(Clock *c, int *queue_serial)
1405 {
1406  c->speed = 1.0;
1407  c->paused = 0;
1408  c->queue_serial = queue_serial;
1409  set_clock(c, NAN, -1);
1410 }
1411 
1412 static void sync_clock_to_slave(Clock *c, Clock *slave)
1413 {
1414  double clock = get_clock(c);
1415  double slave_clock = get_clock(slave);
1416  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1417  set_clock(c, slave_clock, slave->serial);
1418 }
1419 
1421  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1422  if (is->video_st)
1423  return AV_SYNC_VIDEO_MASTER;
1424  else
1425  return AV_SYNC_AUDIO_MASTER;
1426  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1427  if (is->audio_st)
1428  return AV_SYNC_AUDIO_MASTER;
1429  else
1430  return AV_SYNC_EXTERNAL_CLOCK;
1431  } else {
1432  return AV_SYNC_EXTERNAL_CLOCK;
1433  }
1434 }
1435 
1436 /* get the current master clock value */
1437 static double get_master_clock(VideoState *is)
1438 {
1439  double val;
1440 
1441  switch (get_master_sync_type(is)) {
1442  case AV_SYNC_VIDEO_MASTER:
1443  val = get_clock(&is->vidclk);
1444  break;
1445  case AV_SYNC_AUDIO_MASTER:
1446  val = get_clock(&is->audclk);
1447  break;
1448  default:
1449  val = get_clock(&is->extclk);
1450  break;
1451  }
1452  return val;
1453 }
1454 
1456  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1459  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1462  } else {
1463  double speed = is->extclk.speed;
1464  if (speed != 1.0)
1465  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1466  }
1467 }
1468 
1469 /* seek in the stream */
1470 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1471 {
1472  if (!is->seek_req) {
1473  is->seek_pos = pos;
1474  is->seek_rel = rel;
1475  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1476  if (seek_by_bytes)
1478  is->seek_req = 1;
1479  SDL_CondSignal(is->continue_read_thread);
1480  }
1481 }
1482 
1483 /* pause or resume the video */
1485 {
1486  if (is->paused) {
1487  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1488  if (is->read_pause_return != AVERROR(ENOSYS)) {
1489  is->vidclk.paused = 0;
1490  }
1491  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1492  }
1493  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1494  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1495 }
1496 
1497 static void toggle_pause(VideoState *is)
1498 {
1499  stream_toggle_pause(is);
1500  is->step = 0;
1501 }
1502 
1503 static void toggle_mute(VideoState *is)
1504 {
1505  is->muted = !is->muted;
1506 }
1507 
1508 static void update_volume(VideoState *is, int sign, double step)
1509 {
1510  double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1511  int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0));
1512  is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1513 }
1514 
1516 {
1517  /* if the stream is paused unpause it, then step */
1518  if (is->paused)
1519  stream_toggle_pause(is);
1520  is->step = 1;
1521 }
1522 
1523 static double compute_target_delay(double delay, VideoState *is)
1524 {
1525  double sync_threshold, diff = 0;
1526 
1527  /* update delay to follow master synchronisation source */
1529  /* if video is slave, we try to correct big delays by
1530  duplicating or deleting a frame */
1531  diff = get_clock(&is->vidclk) - get_master_clock(is);
1532 
1533  /* skip or repeat frame. We take into account the
1534  delay to compute the threshold. I still don't know
1535  if it is the best guess */
1536  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1537  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1538  if (diff <= -sync_threshold)
1539  delay = FFMAX(0, delay + diff);
1540  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1541  delay = delay + diff;
1542  else if (diff >= sync_threshold)
1543  delay = 2 * delay;
1544  }
1545  }
1546 
1547  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1548  delay, -diff);
1549 
1550  return delay;
1551 }
1552 
1553 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1554  if (vp->serial == nextvp->serial) {
1555  double duration = nextvp->pts - vp->pts;
1556  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1557  return vp->duration;
1558  else
1559  return duration;
1560  } else {
1561  return 0.0;
1562  }
1563 }
1564 
1565 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1566  /* update current video pts */
1567  set_clock(&is->vidclk, pts, serial);
1568  sync_clock_to_slave(&is->extclk, &is->vidclk);
1569 }
1570 
1571 /* called to display each frame */
1572 static void video_refresh(void *opaque, double *remaining_time)
1573 {
1574  VideoState *is = opaque;
1575  double time;
1576 
1577  Frame *sp, *sp2;
1578 
1579  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1581 
1582  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1583  time = av_gettime_relative() / 1000000.0;
1584  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1585  video_display(is);
1586  is->last_vis_time = time;
1587  }
1588  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1589  }
1590 
1591  if (is->video_st) {
1592 retry:
1593  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1594  // nothing to do, no picture to display in the queue
1595  } else {
1596  double last_duration, duration, delay;
1597  Frame *vp, *lastvp;
1598 
1599  /* dequeue the picture */
1600  lastvp = frame_queue_peek_last(&is->pictq);
1601  vp = frame_queue_peek(&is->pictq);
1602 
1603  if (vp->serial != is->videoq.serial) {
1604  frame_queue_next(&is->pictq);
1605  goto retry;
1606  }
1607 
1608  if (lastvp->serial != vp->serial)
1609  is->frame_timer = av_gettime_relative() / 1000000.0;
1610 
1611  if (is->paused)
1612  goto display;
1613 
1614  /* compute nominal last_duration */
1615  last_duration = vp_duration(is, lastvp, vp);
1616  delay = compute_target_delay(last_duration, is);
1617 
1618  time= av_gettime_relative()/1000000.0;
1619  if (time < is->frame_timer + delay) {
1620  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1621  goto display;
1622  }
1623 
1624  is->frame_timer += delay;
1625  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1626  is->frame_timer = time;
1627 
1628  SDL_LockMutex(is->pictq.mutex);
1629  if (!isnan(vp->pts))
1630  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1631  SDL_UnlockMutex(is->pictq.mutex);
1632 
1633  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1634  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1635  duration = vp_duration(is, vp, nextvp);
1636  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1637  is->frame_drops_late++;
1638  frame_queue_next(&is->pictq);
1639  goto retry;
1640  }
1641  }
1642 
1643  if (is->subtitle_st) {
1644  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1645  sp = frame_queue_peek(&is->subpq);
1646 
1647  if (frame_queue_nb_remaining(&is->subpq) > 1)
1648  sp2 = frame_queue_peek_next(&is->subpq);
1649  else
1650  sp2 = NULL;
1651 
1652  if (sp->serial != is->subtitleq.serial
1653  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1654  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1655  {
1656  if (sp->uploaded) {
1657  int i;
1658  for (i = 0; i < sp->sub.num_rects; i++) {
1659  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1660  uint8_t *pixels;
1661  int pitch, j;
1662 
1663  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
1664  for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1665  memset(pixels, 0, sub_rect->w << 2);
1666  SDL_UnlockTexture(is->sub_texture);
1667  }
1668  }
1669  }
1670  frame_queue_next(&is->subpq);
1671  } else {
1672  break;
1673  }
1674  }
1675  }
1676 
1677  frame_queue_next(&is->pictq);
1678  is->force_refresh = 1;
1679 
1680  if (is->step && !is->paused)
1681  stream_toggle_pause(is);
1682  }
1683 display:
1684  /* display picture */
1685  if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1686  video_display(is);
1687  }
1688  is->force_refresh = 0;
1689  if (show_status) {
1690  static int64_t last_time;
1691  int64_t cur_time;
1692  int aqsize, vqsize, sqsize;
1693  double av_diff;
1694 
1695  cur_time = av_gettime_relative();
1696  if (!last_time || (cur_time - last_time) >= 30000) {
1697  aqsize = 0;
1698  vqsize = 0;
1699  sqsize = 0;
1700  if (is->audio_st)
1701  aqsize = is->audioq.size;
1702  if (is->video_st)
1703  vqsize = is->videoq.size;
1704  if (is->subtitle_st)
1705  sqsize = is->subtitleq.size;
1706  av_diff = 0;
1707  if (is->audio_st && is->video_st)
1708  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1709  else if (is->video_st)
1710  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1711  else if (is->audio_st)
1712  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1714  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1715  get_master_clock(is),
1716  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1717  av_diff,
1719  aqsize / 1024,
1720  vqsize / 1024,
1721  sqsize,
1724  fflush(stdout);
1725  last_time = cur_time;
1726  }
1727  }
1728 }
1729 
1730 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1731 {
1732  Frame *vp;
1733 
1734 #if defined(DEBUG_SYNC)
1735  printf("frame_type=%c pts=%0.3f\n",
1736  av_get_picture_type_char(src_frame->pict_type), pts);
1737 #endif
1738 
1739  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1740  return -1;
1741 
1742  vp->sar = src_frame->sample_aspect_ratio;
1743  vp->uploaded = 0;
1744 
1745  vp->width = src_frame->width;
1746  vp->height = src_frame->height;
1747  vp->format = src_frame->format;
1748 
1749  vp->pts = pts;
1750  vp->duration = duration;
1751  vp->pos = pos;
1752  vp->serial = serial;
1753 
1754  set_default_window_size(vp->width, vp->height, vp->sar);
1755 
1756  av_frame_move_ref(vp->frame, src_frame);
1757  frame_queue_push(&is->pictq);
1758  return 0;
1759 }
1760 
1762 {
1763  int got_picture;
1764 
1765  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1766  return -1;
1767 
1768  if (got_picture) {
1769  double dpts = NAN;
1770 
1771  if (frame->pts != AV_NOPTS_VALUE)
1772  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1773 
1774  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1775 
1777  if (frame->pts != AV_NOPTS_VALUE) {
1778  double diff = dpts - get_master_clock(is);
1779  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1780  diff - is->frame_last_filter_delay < 0 &&
1781  is->viddec.pkt_serial == is->vidclk.serial &&
1782  is->videoq.nb_packets) {
1783  is->frame_drops_early++;
1784  av_frame_unref(frame);
1785  got_picture = 0;
1786  }
1787  }
1788  }
1789  }
1790 
1791  return got_picture;
1792 }
1793 
1794 #if CONFIG_AVFILTER
1795 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1796  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1797 {
1798  int ret, i;
1799  int nb_filters = graph->nb_filters;
1801 
1802  if (filtergraph) {
1803  outputs = avfilter_inout_alloc();
1804  inputs = avfilter_inout_alloc();
1805  if (!outputs || !inputs) {
1806  ret = AVERROR(ENOMEM);
1807  goto fail;
1808  }
1809 
1810  outputs->name = av_strdup("in");
1811  outputs->filter_ctx = source_ctx;
1812  outputs->pad_idx = 0;
1813  outputs->next = NULL;
1814 
1815  inputs->name = av_strdup("out");
1816  inputs->filter_ctx = sink_ctx;
1817  inputs->pad_idx = 0;
1818  inputs->next = NULL;
1819 
1820  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1821  goto fail;
1822  } else {
1823  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1824  goto fail;
1825  }
1826 
1827  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1828  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1829  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1830 
1831  ret = avfilter_graph_config(graph, NULL);
1832 fail:
1833  avfilter_inout_free(&outputs);
1834  avfilter_inout_free(&inputs);
1835  return ret;
1836 }
1837 
1838 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1839 {
1841  char sws_flags_str[512] = "";
1842  char buffersrc_args[256];
1843  int ret;
1844  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1845  AVCodecParameters *codecpar = is->video_st->codecpar;
1846  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1847  AVDictionaryEntry *e = NULL;
1848  int nb_pix_fmts = 0;
1849  int i, j;
1850 
1851  for (i = 0; i < renderer_info.num_texture_formats; i++) {
1852  for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; j++) {
1853  if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
1854  pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
1855  break;
1856  }
1857  }
1858  }
1859  pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE;
1860 
1861  while ((e = av_dict_get(sws_dict, "", e, AV_DICT_IGNORE_SUFFIX))) {
1862  if (!strcmp(e->key, "sws_flags")) {
1863  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1864  } else
1865  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1866  }
1867  if (strlen(sws_flags_str))
1868  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1869 
1870  graph->scale_sws_opts = av_strdup(sws_flags_str);
1871 
1872  snprintf(buffersrc_args, sizeof(buffersrc_args),
1873  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1874  frame->width, frame->height, frame->format,
1876  codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1));
1877  if (fr.num && fr.den)
1878  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1879 
1880  if ((ret = avfilter_graph_create_filter(&filt_src,
1881  avfilter_get_by_name("buffer"),
1882  "ffplay_buffer", buffersrc_args, NULL,
1883  graph)) < 0)
1884  goto fail;
1885 
1886  ret = avfilter_graph_create_filter(&filt_out,
1887  avfilter_get_by_name("buffersink"),
1888  "ffplay_buffersink", NULL, NULL, graph);
1889  if (ret < 0)
1890  goto fail;
1891 
1892  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1893  goto fail;
1894 
1895  last_filter = filt_out;
1896 
1897 /* Note: this macro adds a filter before the lastly added filter, so the
1898  * processing order of the filters is in reverse */
1899 #define INSERT_FILT(name, arg) do { \
1900  AVFilterContext *filt_ctx; \
1901  \
1902  ret = avfilter_graph_create_filter(&filt_ctx, \
1903  avfilter_get_by_name(name), \
1904  "ffplay_" name, arg, NULL, graph); \
1905  if (ret < 0) \
1906  goto fail; \
1907  \
1908  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1909  if (ret < 0) \
1910  goto fail; \
1911  \
1912  last_filter = filt_ctx; \
1913 } while (0)
1914 
1915  if (autorotate) {
1916  double theta = get_rotation(is->video_st);
1917 
1918  if (fabs(theta - 90) < 1.0) {
1919  INSERT_FILT("transpose", "clock");
1920  } else if (fabs(theta - 180) < 1.0) {
1921  INSERT_FILT("hflip", NULL);
1922  INSERT_FILT("vflip", NULL);
1923  } else if (fabs(theta - 270) < 1.0) {
1924  INSERT_FILT("transpose", "cclock");
1925  } else if (fabs(theta) > 1.0) {
1926  char rotate_buf[64];
1927  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1928  INSERT_FILT("rotate", rotate_buf);
1929  }
1930  }
1931 
1932  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1933  goto fail;
1934 
1935  is->in_video_filter = filt_src;
1936  is->out_video_filter = filt_out;
1937 
1938 fail:
1939  return ret;
1940 }
1941 
1942 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1943 {
1945  int sample_rates[2] = { 0, -1 };
1946  int64_t channel_layouts[2] = { 0, -1 };
1947  int channels[2] = { 0, -1 };
1948  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1949  char aresample_swr_opts[512] = "";
1950  AVDictionaryEntry *e = NULL;
1951  char asrc_args[256];
1952  int ret;
1953 
1954  avfilter_graph_free(&is->agraph);
1955  if (!(is->agraph = avfilter_graph_alloc()))
1956  return AVERROR(ENOMEM);
1957 
1958  while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
1959  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
1960  if (strlen(aresample_swr_opts))
1961  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
1962  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
1963 
1964  ret = snprintf(asrc_args, sizeof(asrc_args),
1965  "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1966  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1967  is->audio_filter_src.channels,
1968  1, is->audio_filter_src.freq);
1969  if (is->audio_filter_src.channel_layout)
1970  snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1971  ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
1972 
1973  ret = avfilter_graph_create_filter(&filt_asrc,
1974  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1975  asrc_args, NULL, is->agraph);
1976  if (ret < 0)
1977  goto end;
1978 
1979 
1980  ret = avfilter_graph_create_filter(&filt_asink,
1981  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1982  NULL, NULL, is->agraph);
1983  if (ret < 0)
1984  goto end;
1985 
1986  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1987  goto end;
1988  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1989  goto end;
1990 
1991  if (force_output_format) {
1992  channel_layouts[0] = is->audio_tgt.channel_layout;
1993  channels [0] = is->audio_tgt.channels;
1994  sample_rates [0] = is->audio_tgt.freq;
1995  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
1996  goto end;
1997  if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1998  goto end;
1999  if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2000  goto end;
2001  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2002  goto end;
2003  }
2004 
2005 
2006  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2007  goto end;
2008 
2009  is->in_audio_filter = filt_asrc;
2010  is->out_audio_filter = filt_asink;
2011 
2012 end:
2013  if (ret < 0)
2014  avfilter_graph_free(&is->agraph);
2015  return ret;
2016 }
2017 #endif /* CONFIG_AVFILTER */
2018 
2019 static int audio_thread(void *arg)
2020 {
2021  VideoState *is = arg;
2022  AVFrame *frame = av_frame_alloc();
2023  Frame *af;
2024 #if CONFIG_AVFILTER
2025  int last_serial = -1;
2026  int64_t dec_channel_layout;
2027  int reconfigure;
2028 #endif
2029  int got_frame = 0;
2030  AVRational tb;
2031  int ret = 0;
2032 
2033  if (!frame)
2034  return AVERROR(ENOMEM);
2035 
2036  do {
2037  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2038  goto the_end;
2039 
2040  if (got_frame) {
2041  tb = (AVRational){1, frame->sample_rate};
2042 
2043 #if CONFIG_AVFILTER
2044  dec_channel_layout = get_valid_channel_layout(frame->channel_layout, frame->channels);
2045 
2046  reconfigure =
2047  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2048  frame->format, frame->channels) ||
2049  is->audio_filter_src.channel_layout != dec_channel_layout ||
2050  is->audio_filter_src.freq != frame->sample_rate ||
2051  is->auddec.pkt_serial != last_serial;
2052 
2053  if (reconfigure) {
2054  char buf1[1024], buf2[1024];
2055  av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2056  av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2058  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2059  is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2060  frame->sample_rate, frame->channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
2061 
2062  is->audio_filter_src.fmt = frame->format;
2063  is->audio_filter_src.channels = frame->channels;
2064  is->audio_filter_src.channel_layout = dec_channel_layout;
2065  is->audio_filter_src.freq = frame->sample_rate;
2066  last_serial = is->auddec.pkt_serial;
2067 
2068  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2069  goto the_end;
2070  }
2071 
2072  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2073  goto the_end;
2074 
2075  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2076  tb = av_buffersink_get_time_base(is->out_audio_filter);
2077 #endif
2078  if (!(af = frame_queue_peek_writable(&is->sampq)))
2079  goto the_end;
2080 
2081  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2082  af->pos = frame->pkt_pos;
2083  af->serial = is->auddec.pkt_serial;
2084  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2085 
2086  av_frame_move_ref(af->frame, frame);
2087  frame_queue_push(&is->sampq);
2088 
2089 #if CONFIG_AVFILTER
2090  if (is->audioq.serial != is->auddec.pkt_serial)
2091  break;
2092  }
2093  if (ret == AVERROR_EOF)
2094  is->auddec.finished = is->auddec.pkt_serial;
2095 #endif
2096  }
2097  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2098  the_end:
2099 #if CONFIG_AVFILTER
2100  avfilter_graph_free(&is->agraph);
2101 #endif
2102  av_frame_free(&frame);
2103  return ret;
2104 }
2105 
2106 static int decoder_start(Decoder *d, int (*fn)(void *), void *arg)
2107 {
2109  d->decoder_tid = SDL_CreateThread(fn, "decoder", arg);
2110  if (!d->decoder_tid) {
2111  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2112  return AVERROR(ENOMEM);
2113  }
2114  return 0;
2115 }
2116 
2117 static int video_thread(void *arg)
2118 {
2119  VideoState *is = arg;
2120  AVFrame *frame = av_frame_alloc();
2121  double pts;
2122  double duration;
2123  int ret;
2125  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2126 
2127 #if CONFIG_AVFILTER
2129  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2130  int last_w = 0;
2131  int last_h = 0;
2132  enum AVPixelFormat last_format = -2;
2133  int last_serial = -1;
2134  int last_vfilter_idx = 0;
2135  if (!graph) {
2136  av_frame_free(&frame);
2137  return AVERROR(ENOMEM);
2138  }
2139 
2140 #endif
2141 
2142  if (!frame) {
2143 #if CONFIG_AVFILTER
2144  avfilter_graph_free(&graph);
2145 #endif
2146  return AVERROR(ENOMEM);
2147  }
2148 
2149  for (;;) {
2150  ret = get_video_frame(is, frame);
2151  if (ret < 0)
2152  goto the_end;
2153  if (!ret)
2154  continue;
2155 
2156 #if CONFIG_AVFILTER
2157  if ( last_w != frame->width
2158  || last_h != frame->height
2159  || last_format != frame->format
2160  || last_serial != is->viddec.pkt_serial
2161  || last_vfilter_idx != is->vfilter_idx) {
2163  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2164  last_w, last_h,
2165  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2166  frame->width, frame->height,
2167  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2168  avfilter_graph_free(&graph);
2169  graph = avfilter_graph_alloc();
2170  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2171  SDL_Event event;
2172  event.type = FF_QUIT_EVENT;
2173  event.user.data1 = is;
2174  SDL_PushEvent(&event);
2175  goto the_end;
2176  }
2177  filt_in = is->in_video_filter;
2178  filt_out = is->out_video_filter;
2179  last_w = frame->width;
2180  last_h = frame->height;
2181  last_format = frame->format;
2182  last_serial = is->viddec.pkt_serial;
2183  last_vfilter_idx = is->vfilter_idx;
2184  frame_rate = av_buffersink_get_frame_rate(filt_out);
2185  }
2186 
2187  ret = av_buffersrc_add_frame(filt_in, frame);
2188  if (ret < 0)
2189  goto the_end;
2190 
2191  while (ret >= 0) {
2192  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2193 
2194  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2195  if (ret < 0) {
2196  if (ret == AVERROR_EOF)
2197  is->viddec.finished = is->viddec.pkt_serial;
2198  ret = 0;
2199  break;
2200  }
2201 
2203  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2204  is->frame_last_filter_delay = 0;
2205  tb = av_buffersink_get_time_base(filt_out);
2206 #endif
2207  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2208  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2209  ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);
2210  av_frame_unref(frame);
2211 #if CONFIG_AVFILTER
2212  if (is->videoq.serial != is->viddec.pkt_serial)
2213  break;
2214  }
2215 #endif
2216 
2217  if (ret < 0)
2218  goto the_end;
2219  }
2220  the_end:
2221 #if CONFIG_AVFILTER
2222  avfilter_graph_free(&graph);
2223 #endif
2224  av_frame_free(&frame);
2225  return 0;
2226 }
2227 
2228 static int subtitle_thread(void *arg)
2229 {
2230  VideoState *is = arg;
2231  Frame *sp;
2232  int got_subtitle;
2233  double pts;
2234 
2235  for (;;) {
2236  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2237  return 0;
2238 
2239  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2240  break;
2241 
2242  pts = 0;
2243 
2244  if (got_subtitle && sp->sub.format == 0) {
2245  if (sp->sub.pts != AV_NOPTS_VALUE)
2246  pts = sp->sub.pts / (double)AV_TIME_BASE;
2247  sp->pts = pts;
2248  sp->serial = is->subdec.pkt_serial;
2249  sp->width = is->subdec.avctx->width;
2250  sp->height = is->subdec.avctx->height;
2251  sp->uploaded = 0;
2252 
2253  /* now we can update the picture count */
2254  frame_queue_push(&is->subpq);
2255  } else if (got_subtitle) {
2256  avsubtitle_free(&sp->sub);
2257  }
2258  }
2259  return 0;
2260 }
2261 
2262 /* copy samples for viewing in editor window */
2263 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2264 {
2265  int size, len;
2266 
2267  size = samples_size / sizeof(short);
2268  while (size > 0) {
2270  if (len > size)
2271  len = size;
2272  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2273  samples += len;
2274  is->sample_array_index += len;
2276  is->sample_array_index = 0;
2277  size -= len;
2278  }
2279 }
2280 
2281 /* return the wanted number of samples to get better sync if sync_type is video
2282  * or external master clock */
2283 static int synchronize_audio(VideoState *is, int nb_samples)
2284 {
2285  int wanted_nb_samples = nb_samples;
2286 
2287  /* if not master, then we try to remove or add samples to correct the clock */
2289  double diff, avg_diff;
2290  int min_nb_samples, max_nb_samples;
2291 
2292  diff = get_clock(&is->audclk) - get_master_clock(is);
2293 
2294  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2295  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2297  /* not enough measures to have a correct estimate */
2298  is->audio_diff_avg_count++;
2299  } else {
2300  /* estimate the A-V difference */
2301  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2302 
2303  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2304  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2305  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2306  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2307  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2308  }
2309  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2310  diff, avg_diff, wanted_nb_samples - nb_samples,
2312  }
2313  } else {
2314  /* too big difference : may be initial PTS errors, so
2315  reset A-V filter */
2316  is->audio_diff_avg_count = 0;
2317  is->audio_diff_cum = 0;
2318  }
2319  }
2320 
2321  return wanted_nb_samples;
2322 }
2323 
2324 /**
2325  * Decode one audio frame and return its uncompressed size.
2326  *
2327  * The processed audio frame is decoded, converted if required, and
2328  * stored in is->audio_buf, with size in bytes given by the return
2329  * value.
2330  */
2332 {
2333  int data_size, resampled_data_size;
2334  int64_t dec_channel_layout;
2335  av_unused double audio_clock0;
2336  int wanted_nb_samples;
2337  Frame *af;
2338 
2339  if (is->paused)
2340  return -1;
2341 
2342  do {
2343 #if defined(_WIN32)
2344  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2346  return -1;
2347  av_usleep (1000);
2348  }
2349 #endif
2350  if (!(af = frame_queue_peek_readable(&is->sampq)))
2351  return -1;
2352  frame_queue_next(&is->sampq);
2353  } while (af->serial != is->audioq.serial);
2354 
2355  data_size = av_samples_get_buffer_size(NULL, af->frame->channels,
2356  af->frame->nb_samples,
2357  af->frame->format, 1);
2358 
2359  dec_channel_layout =
2362  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2363 
2364  if (af->frame->format != is->audio_src.fmt ||
2365  dec_channel_layout != is->audio_src.channel_layout ||
2366  af->frame->sample_rate != is->audio_src.freq ||
2367  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2368  swr_free(&is->swr_ctx);
2371  dec_channel_layout, af->frame->format, af->frame->sample_rate,
2372  0, NULL);
2373  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2375  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2378  swr_free(&is->swr_ctx);
2379  return -1;
2380  }
2381  is->audio_src.channel_layout = dec_channel_layout;
2382  is->audio_src.channels = af->frame->channels;
2383  is->audio_src.freq = af->frame->sample_rate;
2384  is->audio_src.fmt = af->frame->format;
2385  }
2386 
2387  if (is->swr_ctx) {
2388  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2389  uint8_t **out = &is->audio_buf1;
2390  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2391  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2392  int len2;
2393  if (out_size < 0) {
2394  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2395  return -1;
2396  }
2397  if (wanted_nb_samples != af->frame->nb_samples) {
2398  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2399  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2400  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2401  return -1;
2402  }
2403  }
2404  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2405  if (!is->audio_buf1)
2406  return AVERROR(ENOMEM);
2407  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2408  if (len2 < 0) {
2409  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2410  return -1;
2411  }
2412  if (len2 == out_count) {
2413  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2414  if (swr_init(is->swr_ctx) < 0)
2415  swr_free(&is->swr_ctx);
2416  }
2417  is->audio_buf = is->audio_buf1;
2418  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2419  } else {
2420  is->audio_buf = af->frame->data[0];
2421  resampled_data_size = data_size;
2422  }
2423 
2424  audio_clock0 = is->audio_clock;
2425  /* update the audio clock with the pts */
2426  if (!isnan(af->pts))
2427  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2428  else
2429  is->audio_clock = NAN;
2430  is->audio_clock_serial = af->serial;
2431 #ifdef DEBUG
2432  {
2433  static double last_clock;
2434  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2435  is->audio_clock - last_clock,
2436  is->audio_clock, audio_clock0);
2437  last_clock = is->audio_clock;
2438  }
2439 #endif
2440  return resampled_data_size;
2441 }
2442 
2443 /* prepare a new audio buffer */
2444 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2445 {
2446  VideoState *is = opaque;
2447  int audio_size, len1;
2448 
2450 
2451  while (len > 0) {
2452  if (is->audio_buf_index >= is->audio_buf_size) {
2453  audio_size = audio_decode_frame(is);
2454  if (audio_size < 0) {
2455  /* if error, just output silence */
2456  is->audio_buf = NULL;
2458  } else {
2459  if (is->show_mode != SHOW_MODE_VIDEO)
2460  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2461  is->audio_buf_size = audio_size;
2462  }
2463  is->audio_buf_index = 0;
2464  }
2465  len1 = is->audio_buf_size - is->audio_buf_index;
2466  if (len1 > len)
2467  len1 = len;
2468  if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2469  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2470  else {
2471  memset(stream, 0, len1);
2472  if (!is->muted && is->audio_buf)
2473  SDL_MixAudioFormat(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, is->audio_volume);
2474  }
2475  len -= len1;
2476  stream += len1;
2477  is->audio_buf_index += len1;
2478  }
2480  /* Let's assume the audio driver that is used by SDL has two periods. */
2481  if (!isnan(is->audio_clock)) {
2483  sync_clock_to_slave(&is->extclk, &is->audclk);
2484  }
2485 }
2486 
2487 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2488 {
2489  SDL_AudioSpec wanted_spec, spec;
2490  const char *env;
2491  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2492  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2493  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2494 
2495  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2496  if (env) {
2497  wanted_nb_channels = atoi(env);
2498  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2499  }
2500  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2501  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2502  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2503  }
2504  wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2505  wanted_spec.channels = wanted_nb_channels;
2506  wanted_spec.freq = wanted_sample_rate;
2507  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2508  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2509  return -1;
2510  }
2511  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2512  next_sample_rate_idx--;
2513  wanted_spec.format = AUDIO_S16SYS;
2514  wanted_spec.silence = 0;
2515  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2516  wanted_spec.callback = sdl_audio_callback;
2517  wanted_spec.userdata = opaque;
2518  while (!(audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2519  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2520  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2521  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2522  if (!wanted_spec.channels) {
2523  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2524  wanted_spec.channels = wanted_nb_channels;
2525  if (!wanted_spec.freq) {
2527  "No more combinations to try, audio open failed\n");
2528  return -1;
2529  }
2530  }
2531  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2532  }
2533  if (spec.format != AUDIO_S16SYS) {
2535  "SDL advised audio format %d is not supported!\n", spec.format);
2536  return -1;
2537  }
2538  if (spec.channels != wanted_spec.channels) {
2539  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2540  if (!wanted_channel_layout) {
2542  "SDL advised channel count %d is not supported!\n", spec.channels);
2543  return -1;
2544  }
2545  }
2546 
2547  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2548  audio_hw_params->freq = spec.freq;
2549  audio_hw_params->channel_layout = wanted_channel_layout;
2550  audio_hw_params->channels = spec.channels;
2551  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
2552  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2553  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2554  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2555  return -1;
2556  }
2557  return spec.size;
2558 }
2559 
2560 /* open a given stream. Return 0 if OK */
2561 static int stream_component_open(VideoState *is, int stream_index)
2562 {
2563  AVFormatContext *ic = is->ic;
2564  AVCodecContext *avctx;
2565  AVCodec *codec;
2566  const char *forced_codec_name = NULL;
2567  AVDictionary *opts = NULL;
2568  AVDictionaryEntry *t = NULL;
2569  int sample_rate, nb_channels;
2570  int64_t channel_layout;
2571  int ret = 0;
2572  int stream_lowres = lowres;
2573 
2574  if (stream_index < 0 || stream_index >= ic->nb_streams)
2575  return -1;
2576 
2577  avctx = avcodec_alloc_context3(NULL);
2578  if (!avctx)
2579  return AVERROR(ENOMEM);
2580 
2581  ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2582  if (ret < 0)
2583  goto fail;
2584  avctx->pkt_timebase = ic->streams[stream_index]->time_base;
2585 
2586  codec = avcodec_find_decoder(avctx->codec_id);
2587 
2588  switch(avctx->codec_type){
2589  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2590  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2591  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2592  }
2593  if (forced_codec_name)
2594  codec = avcodec_find_decoder_by_name(forced_codec_name);
2595  if (!codec) {
2596  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2597  "No codec could be found with name '%s'\n", forced_codec_name);
2598  else av_log(NULL, AV_LOG_WARNING,
2599  "No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id));
2600  ret = AVERROR(EINVAL);
2601  goto fail;
2602  }
2603 
2604  avctx->codec_id = codec->id;
2605  if (stream_lowres > codec->max_lowres) {
2606  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2607  codec->max_lowres);
2608  stream_lowres = codec->max_lowres;
2609  }
2610  avctx->lowres = stream_lowres;
2611 
2612  if (fast)
2613  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2614 
2615  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2616  if (!av_dict_get(opts, "threads", NULL, 0))
2617  av_dict_set(&opts, "threads", "auto", 0);
2618  if (stream_lowres)
2619  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2620  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2621  av_dict_set(&opts, "refcounted_frames", "1", 0);
2622  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2623  goto fail;
2624  }
2625  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2626  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2628  goto fail;
2629  }
2630 
2631  is->eof = 0;
2632  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2633  switch (avctx->codec_type) {
2634  case AVMEDIA_TYPE_AUDIO:
2635 #if CONFIG_AVFILTER
2636  {
2637  AVFilterContext *sink;
2638 
2639  is->audio_filter_src.freq = avctx->sample_rate;
2640  is->audio_filter_src.channels = avctx->channels;
2641  is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2642  is->audio_filter_src.fmt = avctx->sample_fmt;
2643  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2644  goto fail;
2645  sink = is->out_audio_filter;
2646  sample_rate = av_buffersink_get_sample_rate(sink);
2647  nb_channels = av_buffersink_get_channels(sink);
2648  channel_layout = av_buffersink_get_channel_layout(sink);
2649  }
2650 #else
2651  sample_rate = avctx->sample_rate;
2652  nb_channels = avctx->channels;
2653  channel_layout = avctx->channel_layout;
2654 #endif
2655 
2656  /* prepare audio output */
2657  if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2658  goto fail;
2659  is->audio_hw_buf_size = ret;
2660  is->audio_src = is->audio_tgt;
2661  is->audio_buf_size = 0;
2662  is->audio_buf_index = 0;
2663 
2664  /* init averaging filter */
2665  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2666  is->audio_diff_avg_count = 0;
2667  /* since we do not have a precise anough audio FIFO fullness,
2668  we correct audio sync only if larger than this threshold */
2670 
2671  is->audio_stream = stream_index;
2672  is->audio_st = ic->streams[stream_index];
2673 
2674  decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
2676  is->auddec.start_pts = is->audio_st->start_time;
2678  }
2679  if ((ret = decoder_start(&is->auddec, audio_thread, is)) < 0)
2680  goto out;
2681  SDL_PauseAudioDevice(audio_dev, 0);
2682  break;
2683  case AVMEDIA_TYPE_VIDEO:
2684  is->video_stream = stream_index;
2685  is->video_st = ic->streams[stream_index];
2686 
2687  decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
2688  if ((ret = decoder_start(&is->viddec, video_thread, is)) < 0)
2689  goto out;
2690  is->queue_attachments_req = 1;
2691  break;
2692  case AVMEDIA_TYPE_SUBTITLE:
2693  is->subtitle_stream = stream_index;
2694  is->subtitle_st = ic->streams[stream_index];
2695 
2696  decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
2697  if ((ret = decoder_start(&is->subdec, subtitle_thread, is)) < 0)
2698  goto out;
2699  break;
2700  default:
2701  break;
2702  }
2703  goto out;
2704 
2705 fail:
2706  avcodec_free_context(&avctx);
2707 out:
2708  av_dict_free(&opts);
2709 
2710  return ret;
2711 }
2712 
2713 static int decode_interrupt_cb(void *ctx)
2714 {
2715  VideoState *is = ctx;
2716  return is->abort_request;
2717 }
2718 
2719 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2720  return stream_id < 0 ||
2721  queue->abort_request ||
2723  queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2724 }
2725 
2727 {
2728  if( !strcmp(s->iformat->name, "rtp")
2729  || !strcmp(s->iformat->name, "rtsp")
2730  || !strcmp(s->iformat->name, "sdp")
2731  )
2732  return 1;
2733 
2734  if(s->pb && ( !strncmp(s->url, "rtp:", 4)
2735  || !strncmp(s->url, "udp:", 4)
2736  )
2737  )
2738  return 1;
2739  return 0;
2740 }
2741 
2742 /* this thread gets the stream from the disk or the network */
2743 static int read_thread(void *arg)
2744 {
2745  VideoState *is = arg;
2746  AVFormatContext *ic = NULL;
2747  int err, i, ret;
2748  int st_index[AVMEDIA_TYPE_NB];
2749  AVPacket pkt1, *pkt = &pkt1;
2750  int64_t stream_start_time;
2751  int pkt_in_play_range = 0;
2752  AVDictionaryEntry *t;
2753  SDL_mutex *wait_mutex = SDL_CreateMutex();
2754  int scan_all_pmts_set = 0;
2755  int64_t pkt_ts;
2756 
2757  if (!wait_mutex) {
2758  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2759  ret = AVERROR(ENOMEM);
2760  goto fail;
2761  }
2762 
2763  memset(st_index, -1, sizeof(st_index));
2764  is->last_video_stream = is->video_stream = -1;
2765  is->last_audio_stream = is->audio_stream = -1;
2766  is->last_subtitle_stream = is->subtitle_stream = -1;
2767  is->eof = 0;
2768 
2769  ic = avformat_alloc_context();
2770  if (!ic) {
2771  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2772  ret = AVERROR(ENOMEM);
2773  goto fail;
2774  }
2776  ic->interrupt_callback.opaque = is;
2777  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2778  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2779  scan_all_pmts_set = 1;
2780  }
2781  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2782  if (err < 0) {
2783  print_error(is->filename, err);
2784  ret = -1;
2785  goto fail;
2786  }
2787  if (scan_all_pmts_set)
2788  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2789 
2791  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2793  goto fail;
2794  }
2795  is->ic = ic;
2796 
2797  if (genpts)
2798  ic->flags |= AVFMT_FLAG_GENPTS;
2799 
2801 
2802  if (find_stream_info) {
2804  int orig_nb_streams = ic->nb_streams;
2805 
2806  err = avformat_find_stream_info(ic, opts);
2807 
2808  for (i = 0; i < orig_nb_streams; i++)
2809  av_dict_free(&opts[i]);
2810  av_freep(&opts);
2811 
2812  if (err < 0) {
2814  "%s: could not find codec parameters\n", is->filename);
2815  ret = -1;
2816  goto fail;
2817  }
2818  }
2819 
2820  if (ic->pb)
2821  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2822 
2823  if (seek_by_bytes < 0)
2824  seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2825 
2826  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2827 
2828  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2829  window_title = av_asprintf("%s - %s", t->value, input_filename);
2830 
2831  /* if seeking requested, we execute it */
2832  if (start_time != AV_NOPTS_VALUE) {
2833  int64_t timestamp;
2834 
2835  timestamp = start_time;
2836  /* add the stream start time */
2837  if (ic->start_time != AV_NOPTS_VALUE)
2838  timestamp += ic->start_time;
2839  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2840  if (ret < 0) {
2841  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2842  is->filename, (double)timestamp / AV_TIME_BASE);
2843  }
2844  }
2845 
2846  is->realtime = is_realtime(ic);
2847 
2848  if (show_status)
2849  av_dump_format(ic, 0, is->filename, 0);
2850 
2851  for (i = 0; i < ic->nb_streams; i++) {
2852  AVStream *st = ic->streams[i];
2853  enum AVMediaType type = st->codecpar->codec_type;
2854  st->discard = AVDISCARD_ALL;
2855  if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
2856  if (avformat_match_stream_specifier(ic, st, wanted_stream_spec[type]) > 0)
2857  st_index[type] = i;
2858  }
2859  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2860  if (wanted_stream_spec[i] && st_index[i] == -1) {
2861  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2862  st_index[i] = INT_MAX;
2863  }
2864  }
2865 
2866  if (!video_disable)
2867  st_index[AVMEDIA_TYPE_VIDEO] =
2869  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2870  if (!audio_disable)
2871  st_index[AVMEDIA_TYPE_AUDIO] =
2873  st_index[AVMEDIA_TYPE_AUDIO],
2874  st_index[AVMEDIA_TYPE_VIDEO],
2875  NULL, 0);
2877  st_index[AVMEDIA_TYPE_SUBTITLE] =
2879  st_index[AVMEDIA_TYPE_SUBTITLE],
2880  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2881  st_index[AVMEDIA_TYPE_AUDIO] :
2882  st_index[AVMEDIA_TYPE_VIDEO]),
2883  NULL, 0);
2884 
2885  is->show_mode = show_mode;
2886  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2887  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2888  AVCodecParameters *codecpar = st->codecpar;
2890  if (codecpar->width)
2891  set_default_window_size(codecpar->width, codecpar->height, sar);
2892  }
2893 
2894  /* open the streams */
2895  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2896  stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2897  }
2898 
2899  ret = -1;
2900  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2901  ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2902  }
2903  if (is->show_mode == SHOW_MODE_NONE)
2904  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2905 
2906  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2907  stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2908  }
2909 
2910  if (is->video_stream < 0 && is->audio_stream < 0) {
2911  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2912  is->filename);
2913  ret = -1;
2914  goto fail;
2915  }
2916 
2917  if (infinite_buffer < 0 && is->realtime)
2918  infinite_buffer = 1;
2919 
2920  for (;;) {
2921  if (is->abort_request)
2922  break;
2923  if (is->paused != is->last_paused) {
2924  is->last_paused = is->paused;
2925  if (is->paused)
2926  is->read_pause_return = av_read_pause(ic);
2927  else
2928  av_read_play(ic);
2929  }
2930 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2931  if (is->paused &&
2932  (!strcmp(ic->iformat->name, "rtsp") ||
2933  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2934  /* wait 10 ms to avoid trying to get another packet */
2935  /* XXX: horrible */
2936  SDL_Delay(10);
2937  continue;
2938  }
2939 #endif
2940  if (is->seek_req) {
2941  int64_t seek_target = is->seek_pos;
2942  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2943  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2944 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2945 // of the seek_pos/seek_rel variables
2946 
2947  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2948  if (ret < 0) {
2950  "%s: error while seeking\n", is->ic->url);
2951  } else {
2952  if (is->audio_stream >= 0) {
2953  packet_queue_flush(&is->audioq);
2954  packet_queue_put(&is->audioq, &flush_pkt);
2955  }
2956  if (is->subtitle_stream >= 0) {
2958  packet_queue_put(&is->subtitleq, &flush_pkt);
2959  }
2960  if (is->video_stream >= 0) {
2961  packet_queue_flush(&is->videoq);
2962  packet_queue_put(&is->videoq, &flush_pkt);
2963  }
2964  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2965  set_clock(&is->extclk, NAN, 0);
2966  } else {
2967  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
2968  }
2969  }
2970  is->seek_req = 0;
2971  is->queue_attachments_req = 1;
2972  is->eof = 0;
2973  if (is->paused)
2974  step_to_next_frame(is);
2975  }
2976  if (is->queue_attachments_req) {
2978  AVPacket copy = { 0 };
2979  if ((ret = av_packet_ref(&copy, &is->video_st->attached_pic)) < 0)
2980  goto fail;
2981  packet_queue_put(&is->videoq, &copy);
2983  }
2984  is->queue_attachments_req = 0;
2985  }
2986 
2987  /* if the queue are full, no need to read more */
2988  if (infinite_buffer<1 &&
2989  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2990  || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
2993  /* wait 10 ms */
2994  SDL_LockMutex(wait_mutex);
2995  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2996  SDL_UnlockMutex(wait_mutex);
2997  continue;
2998  }
2999  if (!is->paused &&
3000  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3001  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3002  if (loop != 1 && (!loop || --loop)) {
3003  stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
3004  } else if (autoexit) {
3005  ret = AVERROR_EOF;
3006  goto fail;
3007  }
3008  }
3009  ret = av_read_frame(ic, pkt);
3010  if (ret < 0) {
3011  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3012  if (is->video_stream >= 0)
3014  if (is->audio_stream >= 0)
3016  if (is->subtitle_stream >= 0)
3018  is->eof = 1;
3019  }
3020  if (ic->pb && ic->pb->error)
3021  break;
3022  SDL_LockMutex(wait_mutex);
3023  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3024  SDL_UnlockMutex(wait_mutex);
3025  continue;
3026  } else {
3027  is->eof = 0;
3028  }
3029  /* check if packet is in play range specified by user, then queue, otherwise discard */
3030  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3031  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3032  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3033  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3034  av_q2d(ic->streams[pkt->stream_index]->time_base) -
3035  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3036  <= ((double)duration / 1000000);
3037  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3038  packet_queue_put(&is->audioq, pkt);
3039  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3041  packet_queue_put(&is->videoq, pkt);
3042  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3043  packet_queue_put(&is->subtitleq, pkt);
3044  } else {
3045  av_packet_unref(pkt);
3046  }
3047  }
3048 
3049  ret = 0;
3050  fail:
3051  if (ic && !is->ic)
3052  avformat_close_input(&ic);
3053 
3054  if (ret != 0) {
3055  SDL_Event event;
3056 
3057  event.type = FF_QUIT_EVENT;
3058  event.user.data1 = is;
3059  SDL_PushEvent(&event);
3060  }
3061  SDL_DestroyMutex(wait_mutex);
3062  return 0;
3063 }
3064 
3065 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
3066 {
3067  VideoState *is;
3068 
3069  is = av_mallocz(sizeof(VideoState));
3070  if (!is)
3071  return NULL;
3072  is->filename = av_strdup(filename);
3073  if (!is->filename)
3074  goto fail;
3075  is->iformat = iformat;
3076  is->ytop = 0;
3077  is->xleft = 0;
3078 
3079  /* start video display */
3080  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3081  goto fail;
3082  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3083  goto fail;
3084  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3085  goto fail;
3086 
3087  if (packet_queue_init(&is->videoq) < 0 ||
3088  packet_queue_init(&is->audioq) < 0 ||
3089  packet_queue_init(&is->subtitleq) < 0)
3090  goto fail;
3091 
3092  if (!(is->continue_read_thread = SDL_CreateCond())) {
3093  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3094  goto fail;
3095  }
3096 
3097  init_clock(&is->vidclk, &is->videoq.serial);
3098  init_clock(&is->audclk, &is->audioq.serial);
3099  init_clock(&is->extclk, &is->extclk.serial);
3100  is->audio_clock_serial = -1;
3101  if (startup_volume < 0)
3102  av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
3103  if (startup_volume > 100)
3104  av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
3105  startup_volume = av_clip(startup_volume, 0, 100);
3106  startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
3108  is->muted = 0;
3109  is->av_sync_type = av_sync_type;
3110  is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
3111  if (!is->read_tid) {
3112  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3113 fail:
3114  stream_close(is);
3115  return NULL;
3116  }
3117  return is;
3118 }
3119 
3121 {
3122  AVFormatContext *ic = is->ic;
3123  int start_index, stream_index;
3124  int old_index;
3125  AVStream *st;
3126  AVProgram *p = NULL;
3127  int nb_streams = is->ic->nb_streams;
3128 
3129  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3130  start_index = is->last_video_stream;
3131  old_index = is->video_stream;
3132  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3133  start_index = is->last_audio_stream;
3134  old_index = is->audio_stream;
3135  } else {
3136  start_index = is->last_subtitle_stream;
3137  old_index = is->subtitle_stream;
3138  }
3139  stream_index = start_index;
3140 
3141  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3143  if (p) {
3144  nb_streams = p->nb_stream_indexes;
3145  for (start_index = 0; start_index < nb_streams; start_index++)
3146  if (p->stream_index[start_index] == stream_index)
3147  break;
3148  if (start_index == nb_streams)
3149  start_index = -1;
3150  stream_index = start_index;
3151  }
3152  }
3153 
3154  for (;;) {
3155  if (++stream_index >= nb_streams)
3156  {
3157  if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3158  {
3159  stream_index = -1;
3160  is->last_subtitle_stream = -1;
3161  goto the_end;
3162  }
3163  if (start_index == -1)
3164  return;
3165  stream_index = 0;
3166  }
3167  if (stream_index == start_index)
3168  return;
3169  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3170  if (st->codecpar->codec_type == codec_type) {
3171  /* check that parameters are OK */
3172  switch (codec_type) {
3173  case AVMEDIA_TYPE_AUDIO:
3174  if (st->codecpar->sample_rate != 0 &&
3175  st->codecpar->channels != 0)
3176  goto the_end;
3177  break;
3178  case AVMEDIA_TYPE_VIDEO:
3179  case AVMEDIA_TYPE_SUBTITLE:
3180  goto the_end;
3181  default:
3182  break;
3183  }
3184  }
3185  }
3186  the_end:
3187  if (p && stream_index != -1)
3188  stream_index = p->stream_index[stream_index];
3189  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3190  av_get_media_type_string(codec_type),
3191  old_index,
3192  stream_index);
3193 
3194  stream_component_close(is, old_index);
3195  stream_component_open(is, stream_index);
3196 }
3197 
3198 
3200 {
3202  SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
3203 }
3204 
3206 {
3207  int next = is->show_mode;
3208  do {
3209  next = (next + 1) % SHOW_MODE_NB;
3210  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3211  if (is->show_mode != next) {
3212  is->force_refresh = 1;
3213  is->show_mode = next;
3214  }
3215 }
3216 
3217 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3218  double remaining_time = 0.0;
3219  SDL_PumpEvents();
3220  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3222  SDL_ShowCursor(0);
3223  cursor_hidden = 1;
3224  }
3225  if (remaining_time > 0.0)
3226  av_usleep((int64_t)(remaining_time * 1000000.0));
3227  remaining_time = REFRESH_RATE;
3228  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3229  video_refresh(is, &remaining_time);
3230  SDL_PumpEvents();
3231  }
3232 }
3233 
3234 static void seek_chapter(VideoState *is, int incr)
3235 {
3236  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3237  int i;
3238 
3239  if (!is->ic->nb_chapters)
3240  return;
3241 
3242  /* find the current chapter */
3243  for (i = 0; i < is->ic->nb_chapters; i++) {
3244  AVChapter *ch = is->ic->chapters[i];
3245  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3246  i--;
3247  break;
3248  }
3249  }
3250 
3251  i += incr;
3252  i = FFMAX(i, 0);
3253  if (i >= is->ic->nb_chapters)
3254  return;
3255 
3256  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3257  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3258  AV_TIME_BASE_Q), 0, 0);
3259 }
3260 
3261 /* handle an event sent by the GUI */
3262 static void event_loop(VideoState *cur_stream)
3263 {
3264  SDL_Event event;
3265  double incr, pos, frac;
3266 
3267  for (;;) {
3268  double x;
3269  refresh_loop_wait_event(cur_stream, &event);
3270  switch (event.type) {
3271  case SDL_KEYDOWN:
3272  if (exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
3273  do_exit(cur_stream);
3274  break;
3275  }
3276  // If we don't yet have a window, skip all key events, because read_thread might still be initializing...
3277  if (!cur_stream->width)
3278  continue;
3279  switch (event.key.keysym.sym) {
3280  case SDLK_f:
3281  toggle_full_screen(cur_stream);
3282  cur_stream->force_refresh = 1;
3283  break;
3284  case SDLK_p:
3285  case SDLK_SPACE:
3286  toggle_pause(cur_stream);
3287  break;
3288  case SDLK_m:
3289  toggle_mute(cur_stream);
3290  break;
3291  case SDLK_KP_MULTIPLY:
3292  case SDLK_0:
3293  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3294  break;
3295  case SDLK_KP_DIVIDE:
3296  case SDLK_9:
3297  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3298  break;
3299  case SDLK_s: // S: Step to next frame
3300  step_to_next_frame(cur_stream);
3301  break;
3302  case SDLK_a:
3304  break;
3305  case SDLK_v:
3307  break;
3308  case SDLK_c:
3312  break;
3313  case SDLK_t:
3315  break;
3316  case SDLK_w:
3317 #if CONFIG_AVFILTER
3318  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3319  if (++cur_stream->vfilter_idx >= nb_vfilters)
3320  cur_stream->vfilter_idx = 0;
3321  } else {
3322  cur_stream->vfilter_idx = 0;
3323  toggle_audio_display(cur_stream);
3324  }
3325 #else
3326  toggle_audio_display(cur_stream);
3327 #endif
3328  break;
3329  case SDLK_PAGEUP:
3330  if (cur_stream->ic->nb_chapters <= 1) {
3331  incr = 600.0;
3332  goto do_seek;
3333  }
3334  seek_chapter(cur_stream, 1);
3335  break;
3336  case SDLK_PAGEDOWN:
3337  if (cur_stream->ic->nb_chapters <= 1) {
3338  incr = -600.0;
3339  goto do_seek;
3340  }
3341  seek_chapter(cur_stream, -1);
3342  break;
3343  case SDLK_LEFT:
3344  incr = seek_interval ? -seek_interval : -10.0;
3345  goto do_seek;
3346  case SDLK_RIGHT:
3347  incr = seek_interval ? seek_interval : 10.0;
3348  goto do_seek;
3349  case SDLK_UP:
3350  incr = 60.0;
3351  goto do_seek;
3352  case SDLK_DOWN:
3353  incr = -60.0;
3354  do_seek:
3355  if (seek_by_bytes) {
3356  pos = -1;
3357  if (pos < 0 && cur_stream->video_stream >= 0)
3358  pos = frame_queue_last_pos(&cur_stream->pictq);
3359  if (pos < 0 && cur_stream->audio_stream >= 0)
3360  pos = frame_queue_last_pos(&cur_stream->sampq);
3361  if (pos < 0)
3362  pos = avio_tell(cur_stream->ic->pb);
3363  if (cur_stream->ic->bit_rate)
3364  incr *= cur_stream->ic->bit_rate / 8.0;
3365  else
3366  incr *= 180000.0;
3367  pos += incr;
3368  stream_seek(cur_stream, pos, incr, 1);
3369  } else {
3370  pos = get_master_clock(cur_stream);
3371  if (isnan(pos))
3372  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3373  pos += incr;
3374  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3375  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3376  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3377  }
3378  break;
3379  default:
3380  break;
3381  }
3382  break;
3383  case SDL_MOUSEBUTTONDOWN:
3384  if (exit_on_mousedown) {
3385  do_exit(cur_stream);
3386  break;
3387  }
3388  if (event.button.button == SDL_BUTTON_LEFT) {
3389  static int64_t last_mouse_left_click = 0;
3390  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3391  toggle_full_screen(cur_stream);
3392  cur_stream->force_refresh = 1;
3393  last_mouse_left_click = 0;
3394  } else {
3395  last_mouse_left_click = av_gettime_relative();
3396  }
3397  }
3398  case SDL_MOUSEMOTION:
3399  if (cursor_hidden) {
3400  SDL_ShowCursor(1);
3401  cursor_hidden = 0;
3402  }
3404  if (event.type == SDL_MOUSEBUTTONDOWN) {
3405  if (event.button.button != SDL_BUTTON_RIGHT)
3406  break;
3407  x = event.button.x;
3408  } else {
3409  if (!(event.motion.state & SDL_BUTTON_RMASK))
3410  break;
3411  x = event.motion.x;
3412  }
3413  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3414  uint64_t size = avio_size(cur_stream->ic->pb);
3415  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3416  } else {
3417  int64_t ts;
3418  int ns, hh, mm, ss;
3419  int tns, thh, tmm, tss;
3420  tns = cur_stream->ic->duration / 1000000LL;
3421  thh = tns / 3600;
3422  tmm = (tns % 3600) / 60;
3423  tss = (tns % 60);
3424  frac = x / cur_stream->width;
3425  ns = frac * tns;
3426  hh = ns / 3600;
3427  mm = (ns % 3600) / 60;
3428  ss = (ns % 60);
3430  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3431  hh, mm, ss, thh, tmm, tss);
3432  ts = frac * cur_stream->ic->duration;
3433  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3434  ts += cur_stream->ic->start_time;
3435  stream_seek(cur_stream, ts, 0, 0);
3436  }
3437  break;
3438  case SDL_WINDOWEVENT:
3439  switch (event.window.event) {
3440  case SDL_WINDOWEVENT_RESIZED:
3441  screen_width = cur_stream->width = event.window.data1;
3442  screen_height = cur_stream->height = event.window.data2;
3443  if (cur_stream->vis_texture) {
3444  SDL_DestroyTexture(cur_stream->vis_texture);
3445  cur_stream->vis_texture = NULL;
3446  }
3447  case SDL_WINDOWEVENT_EXPOSED:
3448  cur_stream->force_refresh = 1;
3449  }
3450  break;
3451  case SDL_QUIT:
3452  case FF_QUIT_EVENT:
3453  do_exit(cur_stream);
3454  break;
3455  default:
3456  break;
3457  }
3458  }
3459 }
3460 
3461 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3462 {
3463  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3464  return opt_default(NULL, "video_size", arg);
3465 }
3466 
3467 static int opt_width(void *optctx, const char *opt, const char *arg)
3468 {
3469  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3470  return 0;
3471 }
3472 
3473 static int opt_height(void *optctx, const char *opt, const char *arg)
3474 {
3475  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3476  return 0;
3477 }
3478 
3479 static int opt_format(void *optctx, const char *opt, const char *arg)
3480 {
3481  file_iformat = av_find_input_format(arg);
3482  if (!file_iformat) {
3483  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3484  return AVERROR(EINVAL);
3485  }
3486  return 0;
3487 }
3488 
3489 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3490 {
3491  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3492  return opt_default(NULL, "pixel_format", arg);
3493 }
3494 
3495 static int opt_sync(void *optctx, const char *opt, const char *arg)
3496 {
3497  if (!strcmp(arg, "audio"))
3499  else if (!strcmp(arg, "video"))
3501  else if (!strcmp(arg, "ext"))
3503  else {
3504  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3505  exit(1);
3506  }
3507  return 0;
3508 }
3509 
3510 static int opt_seek(void *optctx, const char *opt, const char *arg)
3511 {
3512  start_time = parse_time_or_die(opt, arg, 1);
3513  return 0;
3514 }
3515 
3516 static int opt_duration(void *optctx, const char *opt, const char *arg)
3517 {
3518  duration = parse_time_or_die(opt, arg, 1);
3519  return 0;
3520 }
3521 
3522 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3523 {
3524  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3525  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3526  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3527  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3528  return 0;
3529 }
3530 
3531 static void opt_input_file(void *optctx, const char *filename)
3532 {
3533  if (input_filename) {
3535  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3536  filename, input_filename);
3537  exit(1);
3538  }
3539  if (!strcmp(filename, "-"))
3540  filename = "pipe:";
3541  input_filename = filename;
3542 }
3543 
3544 static int opt_codec(void *optctx, const char *opt, const char *arg)
3545 {
3546  const char *spec = strchr(opt, ':');
3547  if (!spec) {
3549  "No media specifier was specified in '%s' in option '%s'\n",
3550  arg, opt);
3551  return AVERROR(EINVAL);
3552  }
3553  spec++;
3554  switch (spec[0]) {
3555  case 'a' : audio_codec_name = arg; break;
3556  case 's' : subtitle_codec_name = arg; break;
3557  case 'v' : video_codec_name = arg; break;
3558  default:
3560  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3561  return AVERROR(EINVAL);
3562  }
3563  return 0;
3564 }
3565 
3566 static int dummy;
3567 
3568 static const OptionDef options[] = {
3570  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3571  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3572  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3573  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3574  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3575  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3576  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3577  { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3578  { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3579  { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3580  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3581  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3582  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3583  { "seek_interval", OPT_FLOAT | HAS_ARG, { &seek_interval }, "set seek interval for left/right keys, in seconds", "seconds" },
3584  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3585  { "noborder", OPT_BOOL, { &borderless }, "borderless window" },
3586  { "volume", OPT_INT | HAS_ARG, { &startup_volume}, "set startup volume 0=min 100=max", "volume" },
3587  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3588  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3589  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3590  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3591  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3592  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3593  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3594  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3595  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3596  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3597  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3598  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3599  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3600  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3601  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3602  { "left", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_left }, "set the x position for the left of the window", "x pos" },
3603  { "top", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_top }, "set the y position for the top of the window", "y pos" },
3604 #if CONFIG_AVFILTER
3605  { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3606  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3607 #endif
3608  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3609  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3610  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3611  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3612  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3613  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3614  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3615  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3616  { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3617  { "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
3618  "read and decode the streams to fill missing information with heuristics" },
3619  { NULL, },
3620 };
3621 
3622 static void show_usage(void)
3623 {
3624  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3625  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3626  av_log(NULL, AV_LOG_INFO, "\n");
3627 }
3628 
3629 void show_help_default(const char *opt, const char *arg)
3630 {
3632  show_usage();
3633  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3634  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3635  printf("\n");
3638 #if !CONFIG_AVFILTER
3640 #else
3642 #endif
3643  printf("\nWhile playing:\n"
3644  "q, ESC quit\n"
3645  "f toggle full screen\n"
3646  "p, SPC pause\n"
3647  "m toggle mute\n"
3648  "9, 0 decrease and increase volume respectively\n"
3649  "/, * decrease and increase volume respectively\n"
3650  "a cycle audio channel in the current program\n"
3651  "v cycle video channel\n"
3652  "t cycle subtitle channel in the current program\n"
3653  "c cycle program\n"
3654  "w cycle video filters or show modes\n"
3655  "s activate frame-step mode\n"
3656  "left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
3657  "down/up seek backward/forward 1 minute\n"
3658  "page down/page up seek backward/forward 10 minutes\n"
3659  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3660  "left double-click toggle full screen\n"
3661  );
3662 }
3663 
3664 /* Called from the main */
3665 int main(int argc, char **argv)
3666 {
3667  int flags;
3668  VideoState *is;
3669 
3670  init_dynload();
3671 
3673  parse_loglevel(argc, argv, options);
3674 
3675  /* register all codecs, demux and protocols */
3676 #if CONFIG_AVDEVICE
3678 #endif
3680 
3681  init_opts();
3682 
3683  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3684  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3685 
3686  show_banner(argc, argv, options);
3687 
3688  parse_options(NULL, argc, argv, options, opt_input_file);
3689 
3690  if (!input_filename) {
3691  show_usage();
3692  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3694  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3695  exit(1);
3696  }
3697 
3698  if (display_disable) {
3699  video_disable = 1;
3700  }
3701  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3702  if (audio_disable)
3703  flags &= ~SDL_INIT_AUDIO;
3704  else {
3705  /* Try to work around an occasional ALSA buffer underflow issue when the
3706  * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3707  if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3708  SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
3709  }
3710  if (display_disable)
3711  flags &= ~SDL_INIT_VIDEO;
3712  if (SDL_Init (flags)) {
3713  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3714  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3715  exit(1);
3716  }
3717 
3718  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3719  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3720 
3721  av_init_packet(&flush_pkt);
3722  flush_pkt.data = (uint8_t *)&flush_pkt;
3723 
3724  if (!display_disable) {
3725  int flags = SDL_WINDOW_HIDDEN;
3726  if (borderless)
3727  flags |= SDL_WINDOW_BORDERLESS;
3728  else
3729  flags |= SDL_WINDOW_RESIZABLE;
3730  window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
3731  SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
3732  if (window) {
3733  renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
3734  if (!renderer) {
3735  av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
3736  renderer = SDL_CreateRenderer(window, -1, 0);
3737  }
3738  if (renderer) {
3739  if (!SDL_GetRendererInfo(renderer, &renderer_info))
3740  av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", renderer_info.name);
3741  }
3742  }
3743  if (!window || !renderer || !renderer_info.num_texture_formats) {
3744  av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError());
3745  do_exit(NULL);
3746  }
3747  }
3748 
3749  is = stream_open(input_filename, file_iformat);
3750  if (!is) {
3751  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3752  do_exit(NULL);
3753  }
3754 
3755  event_loop(is);
3756 
3757  /* never returns */
3758 
3759  return 0;
3760 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1580
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:94
AVFilterContext ** filters
Definition: avfilter.h:842
static void packet_queue_abort(PacketQueue *q)
Definition: ffplay.c:521
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:495
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3522
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:120
static void frame_queue_push(FrameQueue *f)
Definition: ffplay.c:775
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:81
static SDL_AudioDeviceID audio_dev
Definition: ffplay.c:368
static void video_image_display(VideoState *is)
Definition: ffplay.c:972
#define NULL
Definition: coverity.c:32
Clock audclk
Definition: ffplay.c:219
const char const char void * val
Definition: avisynth_c.h:771
#define AVFMT_NOBINSEARCH
Format does not allow to fall back on binary search via read_timestamp.
Definition: avformat.h:475
static void packet_queue_flush(PacketQueue *q)
Definition: ffplay.c:496
int width
Definition: ffplay.c:292
#define OPT_EXPERT
Definition: cmdutils.h:163
static double get_clock(Clock *c)
Definition: ffplay.c:1372
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:336
enum AVSampleFormat fmt
Definition: ffplay.c:138
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3473
static const struct TextureFormatEntry sdl_texture_format_map[]
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffplay.c:64
static void copy(const float *p1, float *p2, const int length)
SDL_cond * cond
Definition: ffplay.c:178
static const char * format[]
Definition: af_aiir.c:331
int64_t pts_correction_num_faulty_dts
Number of incorrect PTS values so far.
Definition: avcodec.h:3111
static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
Definition: ffplay.c:2487
FrameQueue pictq
Definition: ffplay.c:223
static int screen_top
Definition: ffplay.c:318
Decoder auddec
Definition: ffplay.c:227
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:92
AVStream * subtitle_st
Definition: ffplay.c:277
This structure describes decoded (raw) audio or video data.
Definition: frame.h:226
#define VIDEO_PICTURE_QUEUE_SIZE
Definition: ffplay.c:129
static SDL_Renderer * renderer
Definition: ffplay.c:366
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3859
#define SWS_BICUBIC
Definition: swscale.h:60
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1629
double rdftspeed
Definition: ffplay.c:346
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
double frame_timer
Definition: ffplay.c:280
static AVInputFormat * file_iformat
Definition: ffplay.c:310
#define OPT_VIDEO
Definition: cmdutils.h:165
static void opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3531
double get_rotation(AVStream *st)
Definition: cmdutils.c:2175
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:497
misc image utilities
static int opt_format(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3479
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:83
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AVRational next_pts_tb
Definition: ffplay.c:199
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1420
Main libavfilter public API header.
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
int rindex
Definition: ffplay.c:171
#define AV_SYNC_FRAMEDUP_THRESHOLD
Definition: ffplay.c:84
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:79
static int default_height
Definition: ffplay.c:314
Memory buffer source API.
double frame_last_filter_delay
Definition: ffplay.c:282
FrameQueue sampq
Definition: ffplay.c:225
enum VideoState::ShowMode show_mode
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:203
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:499
AVRational sample_aspect_ratio
Video only.
Definition: avcodec.h:3993
int seek_flags
Definition: ffplay.c:212
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:1014
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:708
int serial
Definition: ffplay.c:124
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio...
Definition: utils.c:5056
channels
Definition: aptx.c:30
#define OPT_AUDIO
Definition: cmdutils.h:166
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3120
int num
Numerator.
Definition: rational.h:59
static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3489
int size
Definition: avcodec.h:1453
const char * b
Definition: vf_curves.c:116
static void stream_toggle_pause(VideoState *is)
Definition: ffplay.c:1484
#define DEBUG
Definition: vf_framerate.c:29
MyAVPacketList * first_pkt
Definition: ffplay.c:119
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
Definition: ffplay.c:1470
int av_log2(unsigned v)
Definition: intmath.c:26
static int seek_by_bytes
Definition: ffplay.c:323
double audio_diff_cum
Definition: ffplay.c:237
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
#define REFRESH_RATE
Definition: ffplay.c:100
AVInputFormat * iformat
Definition: ffplay.c:205
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:500
enum AVMediaType codec_type
Definition: rtp.c:37
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1473
AVCodecContext * avctx
Definition: ffplay.c:191
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1180
int paused
Definition: ffplay.c:208
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3544
static AVStream * video_stream
static unsigned sws_flags
Definition: ffplay.c:110
int abort_request
Definition: ffplay.c:123
AVS_VideoFrame int int int int new_height
Definition: avisynth_c.h:818
unsigned num_rects
Definition: avcodec.h:3897
static void set_clock_at(Clock *c, double pts, int serial, double time)
Definition: ffplay.c:1384
static void toggle_pause(VideoState *is)
Definition: ffplay.c:1497
int out_size
Definition: movenc.c:55
double audio_diff_threshold
Definition: ffplay.c:239
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:582
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
discard all
Definition: avcodec.h:803
int64_t channel_layout
Definition: ffplay.c:137
static AVPacket pkt
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
Definition: options.c:95
static int audio_disable
Definition: ffplay.c:319
AVStream * audio_st
Definition: ffplay.c:241
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:1025
#define AV_PIX_FMT_RGB444
Definition: pixfmt.h:373
static const char * audio_codec_name
Definition: ffplay.c:343
#define fn(a)
#define OPT_FLOAT
Definition: cmdutils.h:168
int serial
Definition: ffplay.c:157
AVCodec.
Definition: avcodec.h:3441
static void seek_chapter(VideoState *is, int incr)
Definition: ffplay.c:3234
double pts_drift
Definition: ffplay.c:145
#define CMDUTILS_COMMON_OPTIONS
Definition: cmdutils.h:215
AVDictionary * filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, AVCodec *codec)
Filter out options for given codec.
Definition: cmdutils.c:2079
This struct describes the properties of an encoded stream.
Definition: avcodec.h:3909
int width
Definition: ffplay.c:161
functionally identical to above
Definition: pixfmt.h:501
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:845
AVStream * video_st
Definition: ffplay.c:284
Clock extclk
Definition: ffplay.c:221
static VideoState * stream_open(const char *filename, AVInputFormat *iformat)
Definition: ffplay.c:3065
void * opaque
Definition: avio.h:60
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
struct SwsContext * img_convert_ctx
Definition: ffplay.c:287
AVSubtitleRect ** rects
Definition: avcodec.h:3898
Format I/O context.
Definition: avformat.h:1351
static void toggle_audio_display(VideoState *is)
Definition: ffplay.c:3205
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: utils.c:5079
Definition: ffplay.c:154
memory buffer sink API for audio and video
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:294
static int16_t block[64]
Definition: dct.c:115
int av_sync_type
Definition: ffplay.c:233
unsigned int nb_stream_indexes
Definition: avformat.h:1273
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
int rindex_shown
Definition: ffplay.c:176
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:3861
double pts
Definition: ffplay.c:158
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
Public dictionary API.
double audio_diff_avg_coef
Definition: ffplay.c:238
AVRational start_pts_tb
Definition: ffplay.c:197
static int read_thread(void *arg)
Definition: ffplay.c:2743
int keep_last
Definition: ffplay.c:175
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:135
int rdft_bits
Definition: ffplay.c:268
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:889
int size
Definition: ffplay.c:121
static Frame * frame_queue_peek(FrameQueue *f)
Definition: ffplay.c:728
static int64_t start_time
Definition: ffplay.c:330
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2204
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
Definition: cmdutils.c:99
uint8_t
static int nb_streams
Definition: ffprobe.c:276
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
int opt_default(void *optctx, const char *opt, const char *arg)
Fallback for options that are not explicitly handled, these will be parsed through AVOptions...
Definition: cmdutils.c:545
static int default_width
Definition: ffplay.c:313
int last_video_stream
Definition: ffplay.c:304
int width
Video only.
Definition: avcodec.h:3983
int last_subtitle_stream
Definition: ffplay.c:304
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
AVOptions.
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:661
#define HAS_ARG
Definition: cmdutils.h:161
int audio_hw_buf_size
Definition: ffplay.c:243
static void stream_component_close(VideoState *is, int stream_index)
Definition: ffplay.c:1202
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:202
static int decode_interrupt_cb(void *ctx)
Definition: ffplay.c:2713
#define f(width, name)
Definition: cbs_vp9.c:255
int64_t duration
Definition: ffplay.c:122
struct SwrContext * swr_ctx
Definition: ffplay.c:257
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1470
int finished
Definition: ffplay.c:193
static void event_loop(VideoState *cur_stream)
Definition: ffplay.c:3262
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
Definition: ffplay.c:406
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
Definition: utils.c:5100
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:319
static void packet_queue_destroy(PacketQueue *q)
Definition: ffplay.c:514
#define AVFMT_FLAG_GENPTS
Generate missing pts even if it requires parsing future frames.
Definition: avformat.h:1483
static int framedrop
Definition: ffplay.c:340
SDL_Texture * vis_texture
Definition: ffplay.c:272
void init_opts(void)
Initialize the cmdutils option system, in particular allocate the *_opts contexts.
Definition: cmdutils.c:85
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1419
AVPacket pkt
Definition: ffplay.c:113
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: utils.c:2092
int bytes_per_sec
Definition: ffplay.c:140
static AVFrame * frame
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:144
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
static float seek_interval
Definition: ffplay.c:324
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
static int64_t audio_callback_time
Definition: ffplay.c:359
#define height
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
static int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
Definition: ffplay.c:417
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1482
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
Definition: ffplay.c:541
static void sigterm_handler(int sig)
Definition: ffplay.c:1317
uint8_t * data
Definition: avcodec.h:1452
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:383
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
int freq
Definition: ffplay.c:135
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:653
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:4993
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
Frame queue[FRAME_QUEUE_SIZE]
Definition: ffplay.c:170
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define sp
Definition: regdef.h:63
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
Definition: ffplay.c:143
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:2775
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:168
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:506
external API header
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:86
ptrdiff_t size
Definition: opengl_enc.c:101
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:3862
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:557
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags, int alt_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:177
static int64_t duration
Definition: ffplay.c:331
AVRational sar
Definition: ffplay.c:164
unsigned int * stream_index
Definition: avformat.h:1272
#define av_log(a,...)
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
Definition: opt.h:276
PacketQueue videoq
Definition: ffplay.c:285
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:607
AVDictionary ** setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts)
Setup AVCodecContext options for avformat_find_stream_info().
Definition: cmdutils.c:2136
AVDictionary * format_opts
Definition: cmdutils.c:73
static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
Definition: ffplay.c:836
static int borderless
Definition: ffplay.c:326
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:308
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:104
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
static void toggle_mute(VideoState *is)
Definition: ffplay.c:1503
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:563
Main libavdevice API header.
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
Definition: utils.c:4183
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:3087
libswresample public header
enum AVCodecID id
Definition: avcodec.h:3455
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: utils.c:4166
int audio_diff_avg_count
Definition: ffplay.c:240
int ytop
Definition: ffplay.c:292
int width
Definition: frame.h:284
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:69
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1591
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:360
int seek_req
Definition: ffplay.c:211
#define AV_PIX_FMT_NE(be, le)
Definition: pixfmt.h:354
int(* callback)(void *)
Definition: avio.h:59
#define FRAME_QUEUE_SIZE
Definition: ffplay.c:132
static void video_refresh(void *opaque, double *remaining_time)
Definition: ffplay.c:1572
int read_pause_return
Definition: ffplay.c:215
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:471
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:291
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3860
#define AVERROR(e)
Definition: error.h:43
static AVStream * audio_stream
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:785
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
The libswresample context.
#define MIN_FRAMES
Definition: ffplay.c:67
static int decoder_start(Decoder *d, int(*fn)(void *), void *arg)
Definition: ffplay.c:2106
RDFTContext * rdft
Definition: ffplay.c:267
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:801
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
#define EXTERNAL_CLOCK_MAX_FRAMES
Definition: ffplay.c:69
char * url
input or output URL.
Definition: avformat.h:1447
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:740
static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
Definition: ffplay.c:468
static int autorotate
Definition: ffplay.c:354
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:470
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:106
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: utils.c:4250
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1523
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
enum AVMediaType codec_type
General type of the encoded data.
Definition: avcodec.h:3913
const char * arg
Definition: jacosubdec.c:66
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:568
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:481
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:425
AVChapter ** chapters
Definition: avformat.h:1581
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:345
simple assert() macros that are a bit more flexible than ISO C assert().
static void stream_close(VideoState *is)
Definition: ffplay.c:1259
static int find_stream_info
Definition: ffplay.c:355
#define AV_PIX_FMT_0BGR32
Definition: pixfmt.h:362
int video_stream
Definition: ffplay.c:283
static int video_open(VideoState *is)
Definition: ffplay.c:1334
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
int * queue_serial
Definition: ffplay.c:150
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1404
int xpos
Definition: ffplay.c:270
int channels
Definition: ffplay.c:136
static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
Definition: ffplay.c:887
static enum ShowMode show_mode
Definition: ffplay.c:342
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1268
#define FFMAX(a, b)
Definition: common.h:94
static void packet_queue_start(PacketQueue *q)
Definition: ffplay.c:532
static const OptionDef options[]
Definition: ffplay.c:3568
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:119
float FFTSample
Definition: avfft.h:35
static int dummy
Definition: ffplay.c:3566
#define fail()
Definition: checkasm.h:117
int8_t exp
Definition: eval.c:72
enum AVPixelFormat format
Definition: ffplay.c:371
double audio_clock
Definition: ffplay.c:235
int force_refresh
Definition: ffplay.c:207
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2247
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
AVDictionary * sws_dict
Definition: cmdutils.c:71
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3495
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2369
static void update_sample_display(VideoState *is, short *samples, int samples_size)
Definition: ffplay.c:2263
void av_rdft_calc(RDFTContext *s, FFTSample *data)
uint32_t end_display_time
Definition: avcodec.h:3896
static void decoder_destroy(Decoder *d)
Definition: ffplay.c:677
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:3899
static int genpts
Definition: ffplay.c:333
static AVPacket flush_pkt
Definition: ffplay.c:361
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:403
int flip_v
Definition: ffplay.c:166
double frame_last_returned_time
Definition: ffplay.c:281
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:472
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:488
static const char * subtitle_codec_name
Definition: ffplay.c:344
static int subtitle_disable
Definition: ffplay.c:321
void av_format_inject_global_side_data(AVFormatContext *s)
This function will cause global side data to be injected in the next packet of each stream as well as...
Definition: utils.c:154
int max_size
Definition: ffplay.c:174
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1407
int step
Definition: ffplay.c:293
SDL_Thread * decoder_tid
Definition: ffplay.c:200
AVDictionary * opts
Definition: movenc.c:50
static SDL_Window * window
Definition: ffplay.c:365
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:63
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
Definition: utils.c:4259
SDL_mutex * mutex
Definition: ffplay.c:125
static av_const double hypot(double x, double y)
Definition: libm.h:366
int audio_write_buf_size
Definition: ffplay.c:249
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
Definition: format.c:118
int linesize[4]
Definition: avcodec.h:3877
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:131
int channels
number of audio channels, only used for audio.
Definition: frame.h:530
#define SUBPICTURE_QUEUE_SIZE
Definition: ffplay.c:130
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:309
struct MyAVPacketList * next
Definition: ffplay.c:114
#define AV_CH_LAYOUT_STEREO_DOWNMIX
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
#define NAN
Definition: mathematics.h:64
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
#define FFMIN(a, b)
Definition: common.h:96
SDL_mutex * mutex
Definition: ffplay.c:177
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:400
int windex
Definition: ffplay.c:172
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:156
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:556
static int cursor_hidden
Definition: ffplay.c:348
static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
Definition: ffplay.c:579
#define width
AVSubtitle sub
Definition: ffplay.c:156
int width
picture width / height.
Definition: avcodec.h:1713
uint8_t w
Definition: llviddspenc.c:38
int main(int argc, char **argv)
Definition: ffplay.c:3665
int height
Definition: ffplay.c:162
#define SDL_VOLUME_STEP
Definition: ffplay.c:77
static void show_usage(void)
Definition: ffplay.c:3622
int nb_packets
Definition: ffplay.c:120
static int opt_width(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3467
static int get_video_frame(VideoState *is, AVFrame *frame)
Definition: ffplay.c:1761
int frame_drops_late
Definition: ffplay.c:259
struct AudioParams audio_src
Definition: ffplay.c:252
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
Definition: ffplay.c:3217
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1398
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:74
AVFormatContext * ctx
Definition: movenc.c:48
struct SwrContext * swr_alloc_set_opts(struct SwrContext *s, int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
Definition: swresample.c:59
static int fast
Definition: ffplay.c:332
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2309
int last_i_start
Definition: ffplay.c:266
uint16_t format
Definition: avcodec.h:3894
#define s(width, name)
Definition: cbs_vp9.c:257
#define OPT_INT64
Definition: cmdutils.h:170
MyAVPacketList * last_pkt
Definition: ffplay.c:119
Definition: avfft.h:72
void av_rdft_end(RDFTContext *s)
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1515
int n
Definition: avisynth_c.h:684
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2331
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
static int is_full_screen
Definition: ffplay.c:358
#define AV_SYNC_THRESHOLD_MAX
Definition: ffplay.c:82
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:825
static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue)
Definition: ffplay.c:2719
static int startup_volume
Definition: ffplay.c:327
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:72
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:479
uint8_t * data[4]
data+linesize for the bitmap of this subtitle.
Definition: avcodec.h:3876
static int decoder_reorder_pts
Definition: ffplay.c:335
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:97
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1392
int16_t sample_array[SAMPLE_ARRAY_SIZE]
Definition: ffplay.c:264
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:1008
int paused
Definition: ffplay.c:149
static const char * input_filename
Definition: ffplay.c:311
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
#define FF_ARRAY_ELEMS(a)
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:842
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:1139
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:519
static Frame * frame_queue_peek_last(FrameQueue *f)
Definition: ffplay.c:738
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3629
int64_t pos
Definition: ffplay.c:160
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
int last_audio_stream
Definition: ffplay.c:304
Stream structure.
Definition: avformat.h:874
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: decode.c:1982
const AVClass * avfilter_get_class(void)
Definition: avfilter.c:1635
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:1003
#define SAMPLE_CORRECTION_PERCENT_MAX
Definition: ffplay.c:89
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:299
#define AV_PIX_FMT_BGR555
Definition: pixfmt.h:377
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:677
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:359
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:5005
char * filename
Definition: ffplay.c:291
static int screen_height
Definition: ffplay.c:316
static int opt_duration(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3516
sample_rate
static AVInputFormat * iformat
Definition: ffprobe.c:253
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
int64_t next_pts
Definition: ffplay.c:198
static int autoexit
Definition: ffplay.c:336
AVFrame * frame
Definition: ffplay.c:155
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
int serial
Definition: ffplay.c:148
int uploaded
Definition: ffplay.c:165
enum AVMediaType codec_type
Definition: avcodec.h:1548
int(* read_seek)(struct AVFormatContext *, int stream_index, int64_t timestamp, int flags)
Seek to a given timestamp relative to the frames in stream component stream_index.
Definition: avformat.h:739
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
Definition: ffplay.c:860
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:171
enum AVCodecID codec_id
Definition: avcodec.h:1550
static void do_exit(VideoState *is)
Definition: ffplay.c:1296
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int sample_rate
samples per second
Definition: avcodec.h:2196
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:257
AVIOContext * pb
I/O context.
Definition: avformat.h:1393
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:910
static int loop
Definition: ffplay.c:339
int last_paused
Definition: ffplay.c:209
static int exit_on_keydown
Definition: ffplay.c:337