FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
34 #include "libavutil/eval.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/avassert.h"
42 #include "libavutil/time.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavcodec/avfft.h"
49 
50 #if CONFIG_AVFILTER
51 # include "libavfilter/avfilter.h"
52 # include "libavfilter/buffersink.h"
53 # include "libavfilter/buffersrc.h"
54 #endif
55 
56 #include <SDL.h>
57 #include <SDL_thread.h>
58 
59 #include "cmdutils.h"
60 
61 #include <assert.h>
62 
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65 
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 25
68 #define EXTERNAL_CLOCK_MIN_FRAMES 2
69 #define EXTERNAL_CLOCK_MAX_FRAMES 10
70 
71 /* Minimum SDL audio buffer size, in samples. */
72 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
73 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
74 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
75 
76 /* Step size for volume control in dB */
77 #define SDL_VOLUME_STEP (0.75)
78 
79 /* no AV sync correction is done if below the minimum AV sync threshold */
80 #define AV_SYNC_THRESHOLD_MIN 0.04
81 /* AV sync correction is done if above the maximum AV sync threshold */
82 #define AV_SYNC_THRESHOLD_MAX 0.1
83 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
84 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
85 /* no AV correction is done if too big error */
86 #define AV_NOSYNC_THRESHOLD 10.0
87 
88 /* maximum audio speed change to get correct sync */
89 #define SAMPLE_CORRECTION_PERCENT_MAX 10
90 
91 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
92 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
93 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
94 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
95 
96 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
97 #define AUDIO_DIFF_AVG_NB 20
98 
99 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
100 #define REFRESH_RATE 0.01
101 
102 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
103 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
104 #define SAMPLE_ARRAY_SIZE (8 * 65536)
105 
106 #define CURSOR_HIDE_DELAY 1000000
107 
108 #define USE_ONEPASS_SUBTITLE_RENDER 1
109 
110 static unsigned sws_flags = SWS_BICUBIC;
111 
112 typedef struct MyAVPacketList {
115  int serial;
117 
118 typedef struct PacketQueue {
121  int size;
122  int64_t duration;
124  int serial;
125  SDL_mutex *mutex;
126  SDL_cond *cond;
127 } PacketQueue;
128 
129 #define VIDEO_PICTURE_QUEUE_SIZE 3
130 #define SUBPICTURE_QUEUE_SIZE 16
131 #define SAMPLE_QUEUE_SIZE 9
132 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
133 
134 typedef struct AudioParams {
135  int freq;
136  int channels;
137  int64_t channel_layout;
141 } AudioParams;
142 
143 typedef struct Clock {
144  double pts; /* clock base */
145  double pts_drift; /* clock base minus time at which we updated the clock */
146  double last_updated;
147  double speed;
148  int serial; /* clock is based on a packet with this serial */
149  int paused;
150  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
151 } Clock;
152 
153 /* Common struct for handling all types of decoded data and allocated render buffers. */
154 typedef struct Frame {
157  int serial;
158  double pts; /* presentation timestamp for the frame */
159  double duration; /* estimated duration of the frame */
160  int64_t pos; /* byte position of the frame in the input file */
161  int width;
162  int height;
163  int format;
165  int uploaded;
166  int flip_v;
167 } Frame;
168 
169 typedef struct FrameQueue {
171  int rindex;
172  int windex;
173  int size;
174  int max_size;
177  SDL_mutex *mutex;
178  SDL_cond *cond;
180 } FrameQueue;
181 
182 enum {
183  AV_SYNC_AUDIO_MASTER, /* default choice */
185  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
186 };
187 
188 typedef struct Decoder {
193  int finished;
195  SDL_cond *empty_queue_cond;
196  int64_t start_pts;
198  int64_t next_pts;
200  SDL_Thread *decoder_tid;
201 } Decoder;
202 
203 typedef struct VideoState {
204  SDL_Thread *read_tid;
208  int paused;
211  int seek_req;
213  int64_t seek_pos;
214  int64_t seek_rel;
217  int realtime;
218 
222 
226 
230 
232 
234 
235  double audio_clock;
237  double audio_diff_cum; /* used for AV difference average computation */
246  unsigned int audio_buf_size; /* in bytes */
247  unsigned int audio_buf1_size;
248  int audio_buf_index; /* in bytes */
251  int muted;
253 #if CONFIG_AVFILTER
254  struct AudioParams audio_filter_src;
255 #endif
260 
261  enum ShowMode {
263  } show_mode;
270  int xpos;
272  SDL_Texture *vis_texture;
273  SDL_Texture *sub_texture;
274  SDL_Texture *vid_texture;
275 
279 
280  double frame_timer;
286  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
289  int eof;
290 
291  char *filename;
293  int step;
294 
295 #if CONFIG_AVFILTER
296  int vfilter_idx;
297  AVFilterContext *in_video_filter; // the first filter in the video chain
298  AVFilterContext *out_video_filter; // the last filter in the video chain
299  AVFilterContext *in_audio_filter; // the first filter in the audio chain
300  AVFilterContext *out_audio_filter; // the last filter in the audio chain
301  AVFilterGraph *agraph; // audio filter graph
302 #endif
303 
305 
307 } VideoState;
308 
309 /* options specified by the user */
311 static const char *input_filename;
312 static const char *window_title;
313 static int default_width = 640;
314 static int default_height = 480;
315 static int screen_width = 0;
316 static int screen_height = 0;
317 static int audio_disable;
318 static int video_disable;
319 static int subtitle_disable;
320 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
321 static int seek_by_bytes = -1;
322 static float seek_interval = 10;
323 static int display_disable;
324 static int borderless;
325 static int startup_volume = 100;
326 static int show_status = 1;
328 static int64_t start_time = AV_NOPTS_VALUE;
329 static int64_t duration = AV_NOPTS_VALUE;
330 static int fast = 0;
331 static int genpts = 0;
332 static int lowres = 0;
333 static int decoder_reorder_pts = -1;
334 static int autoexit;
335 static int exit_on_keydown;
336 static int exit_on_mousedown;
337 static int loop = 1;
338 static int framedrop = -1;
339 static int infinite_buffer = -1;
340 static enum ShowMode show_mode = SHOW_MODE_NONE;
341 static const char *audio_codec_name;
342 static const char *subtitle_codec_name;
343 static const char *video_codec_name;
344 double rdftspeed = 0.02;
345 static int64_t cursor_last_shown;
346 static int cursor_hidden = 0;
347 #if CONFIG_AVFILTER
348 static const char **vfilters_list = NULL;
349 static int nb_vfilters = 0;
350 static char *afilters = NULL;
351 #endif
352 static int autorotate = 1;
353 static int find_stream_info = 1;
354 
355 /* current context */
356 static int is_full_screen;
357 static int64_t audio_callback_time;
358 
360 
361 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
362 
363 static SDL_Window *window;
364 static SDL_Renderer *renderer;
365 static SDL_RendererInfo renderer_info = {0};
366 static SDL_AudioDeviceID audio_dev;
367 
368 static const struct TextureFormatEntry {
372  { AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
373  { AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
374  { AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
375  { AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
376  { AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
377  { AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
378  { AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
379  { AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
380  { AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
381  { AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
382  { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
383  { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
384  { AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
385  { AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
386  { AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
387  { AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
388  { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
389  { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
390  { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
391  { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
392 };
393 
394 #if CONFIG_AVFILTER
395 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
396 {
397  GROW_ARRAY(vfilters_list, nb_vfilters);
398  vfilters_list[nb_vfilters - 1] = arg;
399  return 0;
400 }
401 #endif
402 
403 static inline
404 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
405  enum AVSampleFormat fmt2, int64_t channel_count2)
406 {
407  /* If channel count == 1, planar and non-planar formats are the same */
408  if (channel_count1 == 1 && channel_count2 == 1)
410  else
411  return channel_count1 != channel_count2 || fmt1 != fmt2;
412 }
413 
414 static inline
415 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
416 {
417  if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
418  return channel_layout;
419  else
420  return 0;
421 }
422 
424 {
425  MyAVPacketList *pkt1;
426 
427  if (q->abort_request)
428  return -1;
429 
430  pkt1 = av_malloc(sizeof(MyAVPacketList));
431  if (!pkt1)
432  return -1;
433  pkt1->pkt = *pkt;
434  pkt1->next = NULL;
435  if (pkt == &flush_pkt)
436  q->serial++;
437  pkt1->serial = q->serial;
438 
439  if (!q->last_pkt)
440  q->first_pkt = pkt1;
441  else
442  q->last_pkt->next = pkt1;
443  q->last_pkt = pkt1;
444  q->nb_packets++;
445  q->size += pkt1->pkt.size + sizeof(*pkt1);
446  q->duration += pkt1->pkt.duration;
447  /* XXX: should duplicate packet data in DV case */
448  SDL_CondSignal(q->cond);
449  return 0;
450 }
451 
453 {
454  int ret;
455 
456  SDL_LockMutex(q->mutex);
457  ret = packet_queue_put_private(q, pkt);
458  SDL_UnlockMutex(q->mutex);
459 
460  if (pkt != &flush_pkt && ret < 0)
461  av_packet_unref(pkt);
462 
463  return ret;
464 }
465 
466 static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
467 {
468  AVPacket pkt1, *pkt = &pkt1;
469  av_init_packet(pkt);
470  pkt->data = NULL;
471  pkt->size = 0;
472  pkt->stream_index = stream_index;
473  return packet_queue_put(q, pkt);
474 }
475 
476 /* packet queue handling */
478 {
479  memset(q, 0, sizeof(PacketQueue));
480  q->mutex = SDL_CreateMutex();
481  if (!q->mutex) {
482  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
483  return AVERROR(ENOMEM);
484  }
485  q->cond = SDL_CreateCond();
486  if (!q->cond) {
487  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
488  return AVERROR(ENOMEM);
489  }
490  q->abort_request = 1;
491  return 0;
492 }
493 
495 {
496  MyAVPacketList *pkt, *pkt1;
497 
498  SDL_LockMutex(q->mutex);
499  for (pkt = q->first_pkt; pkt; pkt = pkt1) {
500  pkt1 = pkt->next;
501  av_packet_unref(&pkt->pkt);
502  av_freep(&pkt);
503  }
504  q->last_pkt = NULL;
505  q->first_pkt = NULL;
506  q->nb_packets = 0;
507  q->size = 0;
508  q->duration = 0;
509  SDL_UnlockMutex(q->mutex);
510 }
511 
513 {
515  SDL_DestroyMutex(q->mutex);
516  SDL_DestroyCond(q->cond);
517 }
518 
520 {
521  SDL_LockMutex(q->mutex);
522 
523  q->abort_request = 1;
524 
525  SDL_CondSignal(q->cond);
526 
527  SDL_UnlockMutex(q->mutex);
528 }
529 
531 {
532  SDL_LockMutex(q->mutex);
533  q->abort_request = 0;
534  packet_queue_put_private(q, &flush_pkt);
535  SDL_UnlockMutex(q->mutex);
536 }
537 
538 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
539 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
540 {
541  MyAVPacketList *pkt1;
542  int ret;
543 
544  SDL_LockMutex(q->mutex);
545 
546  for (;;) {
547  if (q->abort_request) {
548  ret = -1;
549  break;
550  }
551 
552  pkt1 = q->first_pkt;
553  if (pkt1) {
554  q->first_pkt = pkt1->next;
555  if (!q->first_pkt)
556  q->last_pkt = NULL;
557  q->nb_packets--;
558  q->size -= pkt1->pkt.size + sizeof(*pkt1);
559  q->duration -= pkt1->pkt.duration;
560  *pkt = pkt1->pkt;
561  if (serial)
562  *serial = pkt1->serial;
563  av_free(pkt1);
564  ret = 1;
565  break;
566  } else if (!block) {
567  ret = 0;
568  break;
569  } else {
570  SDL_CondWait(q->cond, q->mutex);
571  }
572  }
573  SDL_UnlockMutex(q->mutex);
574  return ret;
575 }
576 
577 static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
578  memset(d, 0, sizeof(Decoder));
579  d->avctx = avctx;
580  d->queue = queue;
581  d->empty_queue_cond = empty_queue_cond;
583  d->pkt_serial = -1;
584 }
585 
587  int ret = AVERROR(EAGAIN);
588 
589  for (;;) {
590  AVPacket pkt;
591 
592  if (d->queue->serial == d->pkt_serial) {
593  do {
594  if (d->queue->abort_request)
595  return -1;
596 
597  switch (d->avctx->codec_type) {
598  case AVMEDIA_TYPE_VIDEO:
599  ret = avcodec_receive_frame(d->avctx, frame);
600  if (ret >= 0) {
601  if (decoder_reorder_pts == -1) {
602  frame->pts = frame->best_effort_timestamp;
603  } else if (!decoder_reorder_pts) {
604  frame->pts = frame->pkt_dts;
605  }
606  }
607  break;
608  case AVMEDIA_TYPE_AUDIO:
609  ret = avcodec_receive_frame(d->avctx, frame);
610  if (ret >= 0) {
611  AVRational tb = (AVRational){1, frame->sample_rate};
612  if (frame->pts != AV_NOPTS_VALUE)
613  frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);
614  else if (d->next_pts != AV_NOPTS_VALUE)
615  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
616  if (frame->pts != AV_NOPTS_VALUE) {
617  d->next_pts = frame->pts + frame->nb_samples;
618  d->next_pts_tb = tb;
619  }
620  }
621  break;
622  }
623  if (ret == AVERROR_EOF) {
624  d->finished = d->pkt_serial;
626  return 0;
627  }
628  if (ret >= 0)
629  return 1;
630  } while (ret != AVERROR(EAGAIN));
631  }
632 
633  do {
634  if (d->queue->nb_packets == 0)
635  SDL_CondSignal(d->empty_queue_cond);
636  if (d->packet_pending) {
637  av_packet_move_ref(&pkt, &d->pkt);
638  d->packet_pending = 0;
639  } else {
640  if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
641  return -1;
642  }
643  } while (d->queue->serial != d->pkt_serial);
644 
645  if (pkt.data == flush_pkt.data) {
647  d->finished = 0;
648  d->next_pts = d->start_pts;
649  d->next_pts_tb = d->start_pts_tb;
650  } else {
652  int got_frame = 0;
653  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &pkt);
654  if (ret < 0) {
655  ret = AVERROR(EAGAIN);
656  } else {
657  if (got_frame && !pkt.data) {
658  d->packet_pending = 1;
659  av_packet_move_ref(&d->pkt, &pkt);
660  }
661  ret = got_frame ? 0 : (pkt.data ? AVERROR(EAGAIN) : AVERROR_EOF);
662  }
663  } else {
664  if (avcodec_send_packet(d->avctx, &pkt) == AVERROR(EAGAIN)) {
665  av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
666  d->packet_pending = 1;
667  av_packet_move_ref(&d->pkt, &pkt);
668  }
669  }
670  av_packet_unref(&pkt);
671  }
672  }
673 }
674 
675 static void decoder_destroy(Decoder *d) {
676  av_packet_unref(&d->pkt);
678 }
679 
681 {
682  av_frame_unref(vp->frame);
683  avsubtitle_free(&vp->sub);
684 }
685 
686 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
687 {
688  int i;
689  memset(f, 0, sizeof(FrameQueue));
690  if (!(f->mutex = SDL_CreateMutex())) {
691  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
692  return AVERROR(ENOMEM);
693  }
694  if (!(f->cond = SDL_CreateCond())) {
695  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
696  return AVERROR(ENOMEM);
697  }
698  f->pktq = pktq;
699  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
700  f->keep_last = !!keep_last;
701  for (i = 0; i < f->max_size; i++)
702  if (!(f->queue[i].frame = av_frame_alloc()))
703  return AVERROR(ENOMEM);
704  return 0;
705 }
706 
708 {
709  int i;
710  for (i = 0; i < f->max_size; i++) {
711  Frame *vp = &f->queue[i];
713  av_frame_free(&vp->frame);
714  }
715  SDL_DestroyMutex(f->mutex);
716  SDL_DestroyCond(f->cond);
717 }
718 
720 {
721  SDL_LockMutex(f->mutex);
722  SDL_CondSignal(f->cond);
723  SDL_UnlockMutex(f->mutex);
724 }
725 
727 {
728  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
729 }
730 
732 {
733  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
734 }
735 
737 {
738  return &f->queue[f->rindex];
739 }
740 
742 {
743  /* wait until we have space to put a new frame */
744  SDL_LockMutex(f->mutex);
745  while (f->size >= f->max_size &&
746  !f->pktq->abort_request) {
747  SDL_CondWait(f->cond, f->mutex);
748  }
749  SDL_UnlockMutex(f->mutex);
750 
751  if (f->pktq->abort_request)
752  return NULL;
753 
754  return &f->queue[f->windex];
755 }
756 
758 {
759  /* wait until we have a readable a new frame */
760  SDL_LockMutex(f->mutex);
761  while (f->size - f->rindex_shown <= 0 &&
762  !f->pktq->abort_request) {
763  SDL_CondWait(f->cond, f->mutex);
764  }
765  SDL_UnlockMutex(f->mutex);
766 
767  if (f->pktq->abort_request)
768  return NULL;
769 
770  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
771 }
772 
774 {
775  if (++f->windex == f->max_size)
776  f->windex = 0;
777  SDL_LockMutex(f->mutex);
778  f->size++;
779  SDL_CondSignal(f->cond);
780  SDL_UnlockMutex(f->mutex);
781 }
782 
784 {
785  if (f->keep_last && !f->rindex_shown) {
786  f->rindex_shown = 1;
787  return;
788  }
790  if (++f->rindex == f->max_size)
791  f->rindex = 0;
792  SDL_LockMutex(f->mutex);
793  f->size--;
794  SDL_CondSignal(f->cond);
795  SDL_UnlockMutex(f->mutex);
796 }
797 
798 /* return the number of undisplayed frames in the queue */
800 {
801  return f->size - f->rindex_shown;
802 }
803 
804 /* return last shown position */
806 {
807  Frame *fp = &f->queue[f->rindex];
808  if (f->rindex_shown && fp->serial == f->pktq->serial)
809  return fp->pos;
810  else
811  return -1;
812 }
813 
814 static void decoder_abort(Decoder *d, FrameQueue *fq)
815 {
817  frame_queue_signal(fq);
818  SDL_WaitThread(d->decoder_tid, NULL);
819  d->decoder_tid = NULL;
821 }
822 
823 static inline void fill_rectangle(int x, int y, int w, int h)
824 {
825  SDL_Rect rect;
826  rect.x = x;
827  rect.y = y;
828  rect.w = w;
829  rect.h = h;
830  if (w && h)
831  SDL_RenderFillRect(renderer, &rect);
832 }
833 
834 static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
835 {
836  Uint32 format;
837  int access, w, h;
838  if (!*texture || SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
839  void *pixels;
840  int pitch;
841  if (*texture)
842  SDL_DestroyTexture(*texture);
843  if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
844  return -1;
845  if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
846  return -1;
847  if (init_texture) {
848  if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
849  return -1;
850  memset(pixels, 0, pitch * new_height);
851  SDL_UnlockTexture(*texture);
852  }
853  av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
854  }
855  return 0;
856 }
857 
858 static void calculate_display_rect(SDL_Rect *rect,
859  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
860  int pic_width, int pic_height, AVRational pic_sar)
861 {
862  float aspect_ratio;
863  int width, height, x, y;
864 
865  if (pic_sar.num == 0)
866  aspect_ratio = 0;
867  else
868  aspect_ratio = av_q2d(pic_sar);
869 
870  if (aspect_ratio <= 0.0)
871  aspect_ratio = 1.0;
872  aspect_ratio *= (float)pic_width / (float)pic_height;
873 
874  /* XXX: we suppose the screen has a 1.0 pixel ratio */
875  height = scr_height;
876  width = lrint(height * aspect_ratio) & ~1;
877  if (width > scr_width) {
878  width = scr_width;
879  height = lrint(width / aspect_ratio) & ~1;
880  }
881  x = (scr_width - width) / 2;
882  y = (scr_height - height) / 2;
883  rect->x = scr_xleft + x;
884  rect->y = scr_ytop + y;
885  rect->w = FFMAX(width, 1);
886  rect->h = FFMAX(height, 1);
887 }
888 
889 static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
890 {
891  int i;
892  *sdl_blendmode = SDL_BLENDMODE_NONE;
893  *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
894  if (format == AV_PIX_FMT_RGB32 ||
895  format == AV_PIX_FMT_RGB32_1 ||
896  format == AV_PIX_FMT_BGR32 ||
897  format == AV_PIX_FMT_BGR32_1)
898  *sdl_blendmode = SDL_BLENDMODE_BLEND;
899  for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
900  if (format == sdl_texture_format_map[i].format) {
901  *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
902  return;
903  }
904  }
905 }
906 
907 static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx) {
908  int ret = 0;
909  Uint32 sdl_pix_fmt;
910  SDL_BlendMode sdl_blendmode;
911  get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
912  if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
913  return -1;
914  switch (sdl_pix_fmt) {
915  case SDL_PIXELFORMAT_UNKNOWN:
916  /* This should only happen if we are not using avfilter... */
917  *img_convert_ctx = sws_getCachedContext(*img_convert_ctx,
918  frame->width, frame->height, frame->format, frame->width, frame->height,
920  if (*img_convert_ctx != NULL) {
921  uint8_t *pixels[4];
922  int pitch[4];
923  if (!SDL_LockTexture(*tex, NULL, (void **)pixels, pitch)) {
924  sws_scale(*img_convert_ctx, (const uint8_t * const *)frame->data, frame->linesize,
925  0, frame->height, pixels, pitch);
926  SDL_UnlockTexture(*tex);
927  }
928  } else {
929  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
930  ret = -1;
931  }
932  break;
933  case SDL_PIXELFORMAT_IYUV:
934  if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
935  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
936  frame->data[1], frame->linesize[1],
937  frame->data[2], frame->linesize[2]);
938  } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
939  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
940  frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
941  frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
942  } else {
943  av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
944  return -1;
945  }
946  break;
947  default:
948  if (frame->linesize[0] < 0) {
949  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
950  } else {
951  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
952  }
953  break;
954  }
955  return ret;
956 }
957 
959 {
960 #if SDL_VERSION_ATLEAST(2,0,8)
961  SDL_YUV_CONVERSION_MODE mode = SDL_YUV_CONVERSION_AUTOMATIC;
962  if (frame && (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUYV422 || frame->format == AV_PIX_FMT_UYVY422)) {
963  if (frame->color_range == AVCOL_RANGE_JPEG)
964  mode = SDL_YUV_CONVERSION_JPEG;
965  else if (frame->colorspace == AVCOL_SPC_BT709)
966  mode = SDL_YUV_CONVERSION_BT709;
967  else if (frame->colorspace == AVCOL_SPC_BT470BG || frame->colorspace == AVCOL_SPC_SMPTE170M || frame->colorspace == AVCOL_SPC_SMPTE240M)
968  mode = SDL_YUV_CONVERSION_BT601;
969  }
970  SDL_SetYUVConversionMode(mode);
971 #endif
972 }
973 
975 {
976  Frame *vp;
977  Frame *sp = NULL;
978  SDL_Rect rect;
979 
980  vp = frame_queue_peek_last(&is->pictq);
981  if (is->subtitle_st) {
982  if (frame_queue_nb_remaining(&is->subpq) > 0) {
983  sp = frame_queue_peek(&is->subpq);
984 
985  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
986  if (!sp->uploaded) {
987  uint8_t* pixels[4];
988  int pitch[4];
989  int i;
990  if (!sp->width || !sp->height) {
991  sp->width = vp->width;
992  sp->height = vp->height;
993  }
994  if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
995  return;
996 
997  for (i = 0; i < sp->sub.num_rects; i++) {
998  AVSubtitleRect *sub_rect = sp->sub.rects[i];
999 
1000  sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
1001  sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
1002  sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
1003  sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
1004 
1006  sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
1007  sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
1008  0, NULL, NULL, NULL);
1009  if (!is->sub_convert_ctx) {
1010  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1011  return;
1012  }
1013  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) {
1014  sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
1015  0, sub_rect->h, pixels, pitch);
1016  SDL_UnlockTexture(is->sub_texture);
1017  }
1018  }
1019  sp->uploaded = 1;
1020  }
1021  } else
1022  sp = NULL;
1023  }
1024  }
1025 
1026  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1027 
1028  if (!vp->uploaded) {
1029  if (upload_texture(&is->vid_texture, vp->frame, &is->img_convert_ctx) < 0)
1030  return;
1031  vp->uploaded = 1;
1032  vp->flip_v = vp->frame->linesize[0] < 0;
1033  }
1034 
1036  SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
1038  if (sp) {
1039 #if USE_ONEPASS_SUBTITLE_RENDER
1040  SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
1041 #else
1042  int i;
1043  double xratio = (double)rect.w / (double)sp->width;
1044  double yratio = (double)rect.h / (double)sp->height;
1045  for (i = 0; i < sp->sub.num_rects; i++) {
1046  SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
1047  SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
1048  .y = rect.y + sub_rect->y * yratio,
1049  .w = sub_rect->w * xratio,
1050  .h = sub_rect->h * yratio};
1051  SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
1052  }
1053 #endif
1054  }
1055 }
1056 
1057 static inline int compute_mod(int a, int b)
1058 {
1059  return a < 0 ? a%b + b : a%b;
1060 }
1061 
1063 {
1064  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1065  int ch, channels, h, h2;
1066  int64_t time_diff;
1067  int rdft_bits, nb_freq;
1068 
1069  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1070  ;
1071  nb_freq = 1 << (rdft_bits - 1);
1072 
1073  /* compute display index : center on currently output samples */
1074  channels = s->audio_tgt.channels;
1075  nb_display_channels = channels;
1076  if (!s->paused) {
1077  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1078  n = 2 * channels;
1079  delay = s->audio_write_buf_size;
1080  delay /= n;
1081 
1082  /* to be more precise, we take into account the time spent since
1083  the last buffer computation */
1084  if (audio_callback_time) {
1085  time_diff = av_gettime_relative() - audio_callback_time;
1086  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1087  }
1088 
1089  delay += 2 * data_used;
1090  if (delay < data_used)
1091  delay = data_used;
1092 
1093  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1094  if (s->show_mode == SHOW_MODE_WAVES) {
1095  h = INT_MIN;
1096  for (i = 0; i < 1000; i += channels) {
1097  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1098  int a = s->sample_array[idx];
1099  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1100  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1101  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1102  int score = a - d;
1103  if (h < score && (b ^ c) < 0) {
1104  h = score;
1105  i_start = idx;
1106  }
1107  }
1108  }
1109 
1110  s->last_i_start = i_start;
1111  } else {
1112  i_start = s->last_i_start;
1113  }
1114 
1115  if (s->show_mode == SHOW_MODE_WAVES) {
1116  SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
1117 
1118  /* total height for one channel */
1119  h = s->height / nb_display_channels;
1120  /* graph height / 2 */
1121  h2 = (h * 9) / 20;
1122  for (ch = 0; ch < nb_display_channels; ch++) {
1123  i = i_start + ch;
1124  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1125  for (x = 0; x < s->width; x++) {
1126  y = (s->sample_array[i] * h2) >> 15;
1127  if (y < 0) {
1128  y = -y;
1129  ys = y1 - y;
1130  } else {
1131  ys = y1;
1132  }
1133  fill_rectangle(s->xleft + x, ys, 1, y);
1134  i += channels;
1135  if (i >= SAMPLE_ARRAY_SIZE)
1136  i -= SAMPLE_ARRAY_SIZE;
1137  }
1138  }
1139 
1140  SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
1141 
1142  for (ch = 1; ch < nb_display_channels; ch++) {
1143  y = s->ytop + ch * h;
1144  fill_rectangle(s->xleft, y, s->width, 1);
1145  }
1146  } else {
1147  if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
1148  return;
1149 
1150  nb_display_channels= FFMIN(nb_display_channels, 2);
1151  if (rdft_bits != s->rdft_bits) {
1152  av_rdft_end(s->rdft);
1153  av_free(s->rdft_data);
1154  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
1155  s->rdft_bits = rdft_bits;
1156  s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
1157  }
1158  if (!s->rdft || !s->rdft_data){
1159  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1160  s->show_mode = SHOW_MODE_WAVES;
1161  } else {
1162  FFTSample *data[2];
1163  SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
1164  uint32_t *pixels;
1165  int pitch;
1166  for (ch = 0; ch < nb_display_channels; ch++) {
1167  data[ch] = s->rdft_data + 2 * nb_freq * ch;
1168  i = i_start + ch;
1169  for (x = 0; x < 2 * nb_freq; x++) {
1170  double w = (x-nb_freq) * (1.0 / nb_freq);
1171  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
1172  i += channels;
1173  if (i >= SAMPLE_ARRAY_SIZE)
1174  i -= SAMPLE_ARRAY_SIZE;
1175  }
1176  av_rdft_calc(s->rdft, data[ch]);
1177  }
1178  /* Least efficient way to do this, we should of course
1179  * directly access it but it is more than fast enough. */
1180  if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
1181  pitch >>= 2;
1182  pixels += pitch * s->height;
1183  for (y = 0; y < s->height; y++) {
1184  double w = 1 / sqrt(nb_freq);
1185  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1186  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][2 * y + 0], data[1][2 * y + 1]))
1187  : a;
1188  a = FFMIN(a, 255);
1189  b = FFMIN(b, 255);
1190  pixels -= pitch;
1191  *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
1192  }
1193  SDL_UnlockTexture(s->vis_texture);
1194  }
1195  SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
1196  }
1197  if (!s->paused)
1198  s->xpos++;
1199  if (s->xpos >= s->width)
1200  s->xpos= s->xleft;
1201  }
1202 }
1203 
1204 static void stream_component_close(VideoState *is, int stream_index)
1205 {
1206  AVFormatContext *ic = is->ic;
1207  AVCodecParameters *codecpar;
1208 
1209  if (stream_index < 0 || stream_index >= ic->nb_streams)
1210  return;
1211  codecpar = ic->streams[stream_index]->codecpar;
1212 
1213  switch (codecpar->codec_type) {
1214  case AVMEDIA_TYPE_AUDIO:
1215  decoder_abort(&is->auddec, &is->sampq);
1216  SDL_CloseAudioDevice(audio_dev);
1217  decoder_destroy(&is->auddec);
1218  swr_free(&is->swr_ctx);
1219  av_freep(&is->audio_buf1);
1220  is->audio_buf1_size = 0;
1221  is->audio_buf = NULL;
1222 
1223  if (is->rdft) {
1224  av_rdft_end(is->rdft);
1225  av_freep(&is->rdft_data);
1226  is->rdft = NULL;
1227  is->rdft_bits = 0;
1228  }
1229  break;
1230  case AVMEDIA_TYPE_VIDEO:
1231  decoder_abort(&is->viddec, &is->pictq);
1232  decoder_destroy(&is->viddec);
1233  break;
1234  case AVMEDIA_TYPE_SUBTITLE:
1235  decoder_abort(&is->subdec, &is->subpq);
1236  decoder_destroy(&is->subdec);
1237  break;
1238  default:
1239  break;
1240  }
1241 
1242  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1243  switch (codecpar->codec_type) {
1244  case AVMEDIA_TYPE_AUDIO:
1245  is->audio_st = NULL;
1246  is->audio_stream = -1;
1247  break;
1248  case AVMEDIA_TYPE_VIDEO:
1249  is->video_st = NULL;
1250  is->video_stream = -1;
1251  break;
1252  case AVMEDIA_TYPE_SUBTITLE:
1253  is->subtitle_st = NULL;
1254  is->subtitle_stream = -1;
1255  break;
1256  default:
1257  break;
1258  }
1259 }
1260 
1261 static void stream_close(VideoState *is)
1262 {
1263  /* XXX: use a special url_shutdown call to abort parse cleanly */
1264  is->abort_request = 1;
1265  SDL_WaitThread(is->read_tid, NULL);
1266 
1267  /* close each stream */
1268  if (is->audio_stream >= 0)
1270  if (is->video_stream >= 0)
1272  if (is->subtitle_stream >= 0)
1274 
1275  avformat_close_input(&is->ic);
1276 
1280 
1281  /* free all pictures */
1282  frame_queue_destory(&is->pictq);
1283  frame_queue_destory(&is->sampq);
1284  frame_queue_destory(&is->subpq);
1285  SDL_DestroyCond(is->continue_read_thread);
1288  av_free(is->filename);
1289  if (is->vis_texture)
1290  SDL_DestroyTexture(is->vis_texture);
1291  if (is->vid_texture)
1292  SDL_DestroyTexture(is->vid_texture);
1293  if (is->sub_texture)
1294  SDL_DestroyTexture(is->sub_texture);
1295  av_free(is);
1296 }
1297 
1298 static void do_exit(VideoState *is)
1299 {
1300  if (is) {
1301  stream_close(is);
1302  }
1303  if (renderer)
1304  SDL_DestroyRenderer(renderer);
1305  if (window)
1306  SDL_DestroyWindow(window);
1307  uninit_opts();
1308 #if CONFIG_AVFILTER
1309  av_freep(&vfilters_list);
1310 #endif
1312  if (show_status)
1313  printf("\n");
1314  SDL_Quit();
1315  av_log(NULL, AV_LOG_QUIET, "%s", "");
1316  exit(0);
1317 }
1318 
1319 static void sigterm_handler(int sig)
1320 {
1321  exit(123);
1322 }
1323 
1325 {
1326  SDL_Rect rect;
1327  calculate_display_rect(&rect, 0, 0, INT_MAX, height, width, height, sar);
1328  default_width = rect.w;
1329  default_height = rect.h;
1330 }
1331 
1332 static int video_open(VideoState *is)
1333 {
1334  int w,h;
1335 
1336  if (screen_width) {
1337  w = screen_width;
1338  h = screen_height;
1339  } else {
1340  w = default_width;
1341  h = default_height;
1342  }
1343 
1344  if (!window_title)
1346  SDL_SetWindowTitle(window, window_title);
1347 
1348  SDL_SetWindowSize(window, w, h);
1349  SDL_SetWindowPosition(window, SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED);
1350  if (is_full_screen)
1351  SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1352  SDL_ShowWindow(window);
1353 
1354  is->width = w;
1355  is->height = h;
1356 
1357  return 0;
1358 }
1359 
1360 /* display the current picture, if any */
1361 static void video_display(VideoState *is)
1362 {
1363  if (!is->width)
1364  video_open(is);
1365 
1366  SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1367  SDL_RenderClear(renderer);
1368  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1369  video_audio_display(is);
1370  else if (is->video_st)
1371  video_image_display(is);
1372  SDL_RenderPresent(renderer);
1373 }
1374 
1375 static double get_clock(Clock *c)
1376 {
1377  if (*c->queue_serial != c->serial)
1378  return NAN;
1379  if (c->paused) {
1380  return c->pts;
1381  } else {
1382  double time = av_gettime_relative() / 1000000.0;
1383  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1384  }
1385 }
1386 
1387 static void set_clock_at(Clock *c, double pts, int serial, double time)
1388 {
1389  c->pts = pts;
1390  c->last_updated = time;
1391  c->pts_drift = c->pts - time;
1392  c->serial = serial;
1393 }
1394 
1395 static void set_clock(Clock *c, double pts, int serial)
1396 {
1397  double time = av_gettime_relative() / 1000000.0;
1398  set_clock_at(c, pts, serial, time);
1399 }
1400 
1401 static void set_clock_speed(Clock *c, double speed)
1402 {
1403  set_clock(c, get_clock(c), c->serial);
1404  c->speed = speed;
1405 }
1406 
1407 static void init_clock(Clock *c, int *queue_serial)
1408 {
1409  c->speed = 1.0;
1410  c->paused = 0;
1411  c->queue_serial = queue_serial;
1412  set_clock(c, NAN, -1);
1413 }
1414 
1415 static void sync_clock_to_slave(Clock *c, Clock *slave)
1416 {
1417  double clock = get_clock(c);
1418  double slave_clock = get_clock(slave);
1419  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1420  set_clock(c, slave_clock, slave->serial);
1421 }
1422 
1424  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1425  if (is->video_st)
1426  return AV_SYNC_VIDEO_MASTER;
1427  else
1428  return AV_SYNC_AUDIO_MASTER;
1429  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1430  if (is->audio_st)
1431  return AV_SYNC_AUDIO_MASTER;
1432  else
1433  return AV_SYNC_EXTERNAL_CLOCK;
1434  } else {
1435  return AV_SYNC_EXTERNAL_CLOCK;
1436  }
1437 }
1438 
1439 /* get the current master clock value */
1440 static double get_master_clock(VideoState *is)
1441 {
1442  double val;
1443 
1444  switch (get_master_sync_type(is)) {
1445  case AV_SYNC_VIDEO_MASTER:
1446  val = get_clock(&is->vidclk);
1447  break;
1448  case AV_SYNC_AUDIO_MASTER:
1449  val = get_clock(&is->audclk);
1450  break;
1451  default:
1452  val = get_clock(&is->extclk);
1453  break;
1454  }
1455  return val;
1456 }
1457 
1459  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1462  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1465  } else {
1466  double speed = is->extclk.speed;
1467  if (speed != 1.0)
1468  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1469  }
1470 }
1471 
1472 /* seek in the stream */
1473 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1474 {
1475  if (!is->seek_req) {
1476  is->seek_pos = pos;
1477  is->seek_rel = rel;
1478  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1479  if (seek_by_bytes)
1481  is->seek_req = 1;
1482  SDL_CondSignal(is->continue_read_thread);
1483  }
1484 }
1485 
1486 /* pause or resume the video */
1488 {
1489  if (is->paused) {
1490  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1491  if (is->read_pause_return != AVERROR(ENOSYS)) {
1492  is->vidclk.paused = 0;
1493  }
1494  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1495  }
1496  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1497  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1498 }
1499 
1500 static void toggle_pause(VideoState *is)
1501 {
1502  stream_toggle_pause(is);
1503  is->step = 0;
1504 }
1505 
1506 static void toggle_mute(VideoState *is)
1507 {
1508  is->muted = !is->muted;
1509 }
1510 
1511 static void update_volume(VideoState *is, int sign, double step)
1512 {
1513  double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1514  int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0));
1515  is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1516 }
1517 
1519 {
1520  /* if the stream is paused unpause it, then step */
1521  if (is->paused)
1522  stream_toggle_pause(is);
1523  is->step = 1;
1524 }
1525 
1526 static double compute_target_delay(double delay, VideoState *is)
1527 {
1528  double sync_threshold, diff = 0;
1529 
1530  /* update delay to follow master synchronisation source */
1532  /* if video is slave, we try to correct big delays by
1533  duplicating or deleting a frame */
1534  diff = get_clock(&is->vidclk) - get_master_clock(is);
1535 
1536  /* skip or repeat frame. We take into account the
1537  delay to compute the threshold. I still don't know
1538  if it is the best guess */
1539  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1540  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1541  if (diff <= -sync_threshold)
1542  delay = FFMAX(0, delay + diff);
1543  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1544  delay = delay + diff;
1545  else if (diff >= sync_threshold)
1546  delay = 2 * delay;
1547  }
1548  }
1549 
1550  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1551  delay, -diff);
1552 
1553  return delay;
1554 }
1555 
1556 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1557  if (vp->serial == nextvp->serial) {
1558  double duration = nextvp->pts - vp->pts;
1559  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1560  return vp->duration;
1561  else
1562  return duration;
1563  } else {
1564  return 0.0;
1565  }
1566 }
1567 
1568 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1569  /* update current video pts */
1570  set_clock(&is->vidclk, pts, serial);
1571  sync_clock_to_slave(&is->extclk, &is->vidclk);
1572 }
1573 
1574 /* called to display each frame */
1575 static void video_refresh(void *opaque, double *remaining_time)
1576 {
1577  VideoState *is = opaque;
1578  double time;
1579 
1580  Frame *sp, *sp2;
1581 
1582  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1584 
1585  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1586  time = av_gettime_relative() / 1000000.0;
1587  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1588  video_display(is);
1589  is->last_vis_time = time;
1590  }
1591  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1592  }
1593 
1594  if (is->video_st) {
1595 retry:
1596  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1597  // nothing to do, no picture to display in the queue
1598  } else {
1599  double last_duration, duration, delay;
1600  Frame *vp, *lastvp;
1601 
1602  /* dequeue the picture */
1603  lastvp = frame_queue_peek_last(&is->pictq);
1604  vp = frame_queue_peek(&is->pictq);
1605 
1606  if (vp->serial != is->videoq.serial) {
1607  frame_queue_next(&is->pictq);
1608  goto retry;
1609  }
1610 
1611  if (lastvp->serial != vp->serial)
1612  is->frame_timer = av_gettime_relative() / 1000000.0;
1613 
1614  if (is->paused)
1615  goto display;
1616 
1617  /* compute nominal last_duration */
1618  last_duration = vp_duration(is, lastvp, vp);
1619  delay = compute_target_delay(last_duration, is);
1620 
1621  time= av_gettime_relative()/1000000.0;
1622  if (time < is->frame_timer + delay) {
1623  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1624  goto display;
1625  }
1626 
1627  is->frame_timer += delay;
1628  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1629  is->frame_timer = time;
1630 
1631  SDL_LockMutex(is->pictq.mutex);
1632  if (!isnan(vp->pts))
1633  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1634  SDL_UnlockMutex(is->pictq.mutex);
1635 
1636  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1637  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1638  duration = vp_duration(is, vp, nextvp);
1639  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1640  is->frame_drops_late++;
1641  frame_queue_next(&is->pictq);
1642  goto retry;
1643  }
1644  }
1645 
1646  if (is->subtitle_st) {
1647  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1648  sp = frame_queue_peek(&is->subpq);
1649 
1650  if (frame_queue_nb_remaining(&is->subpq) > 1)
1651  sp2 = frame_queue_peek_next(&is->subpq);
1652  else
1653  sp2 = NULL;
1654 
1655  if (sp->serial != is->subtitleq.serial
1656  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1657  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1658  {
1659  if (sp->uploaded) {
1660  int i;
1661  for (i = 0; i < sp->sub.num_rects; i++) {
1662  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1663  uint8_t *pixels;
1664  int pitch, j;
1665 
1666  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
1667  for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1668  memset(pixels, 0, sub_rect->w << 2);
1669  SDL_UnlockTexture(is->sub_texture);
1670  }
1671  }
1672  }
1673  frame_queue_next(&is->subpq);
1674  } else {
1675  break;
1676  }
1677  }
1678  }
1679 
1680  frame_queue_next(&is->pictq);
1681  is->force_refresh = 1;
1682 
1683  if (is->step && !is->paused)
1684  stream_toggle_pause(is);
1685  }
1686 display:
1687  /* display picture */
1688  if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1689  video_display(is);
1690  }
1691  is->force_refresh = 0;
1692  if (show_status) {
1693  static int64_t last_time;
1694  int64_t cur_time;
1695  int aqsize, vqsize, sqsize;
1696  double av_diff;
1697 
1698  cur_time = av_gettime_relative();
1699  if (!last_time || (cur_time - last_time) >= 30000) {
1700  aqsize = 0;
1701  vqsize = 0;
1702  sqsize = 0;
1703  if (is->audio_st)
1704  aqsize = is->audioq.size;
1705  if (is->video_st)
1706  vqsize = is->videoq.size;
1707  if (is->subtitle_st)
1708  sqsize = is->subtitleq.size;
1709  av_diff = 0;
1710  if (is->audio_st && is->video_st)
1711  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1712  else if (is->video_st)
1713  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1714  else if (is->audio_st)
1715  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1717  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1718  get_master_clock(is),
1719  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1720  av_diff,
1722  aqsize / 1024,
1723  vqsize / 1024,
1724  sqsize,
1727  fflush(stdout);
1728  last_time = cur_time;
1729  }
1730  }
1731 }
1732 
1733 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1734 {
1735  Frame *vp;
1736 
1737 #if defined(DEBUG_SYNC)
1738  printf("frame_type=%c pts=%0.3f\n",
1739  av_get_picture_type_char(src_frame->pict_type), pts);
1740 #endif
1741 
1742  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1743  return -1;
1744 
1745  vp->sar = src_frame->sample_aspect_ratio;
1746  vp->uploaded = 0;
1747 
1748  vp->width = src_frame->width;
1749  vp->height = src_frame->height;
1750  vp->format = src_frame->format;
1751 
1752  vp->pts = pts;
1753  vp->duration = duration;
1754  vp->pos = pos;
1755  vp->serial = serial;
1756 
1757  set_default_window_size(vp->width, vp->height, vp->sar);
1758 
1759  av_frame_move_ref(vp->frame, src_frame);
1760  frame_queue_push(&is->pictq);
1761  return 0;
1762 }
1763 
1765 {
1766  int got_picture;
1767 
1768  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1769  return -1;
1770 
1771  if (got_picture) {
1772  double dpts = NAN;
1773 
1774  if (frame->pts != AV_NOPTS_VALUE)
1775  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1776 
1777  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1778 
1780  if (frame->pts != AV_NOPTS_VALUE) {
1781  double diff = dpts - get_master_clock(is);
1782  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1783  diff - is->frame_last_filter_delay < 0 &&
1784  is->viddec.pkt_serial == is->vidclk.serial &&
1785  is->videoq.nb_packets) {
1786  is->frame_drops_early++;
1787  av_frame_unref(frame);
1788  got_picture = 0;
1789  }
1790  }
1791  }
1792  }
1793 
1794  return got_picture;
1795 }
1796 
1797 #if CONFIG_AVFILTER
1798 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1799  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1800 {
1801  int ret, i;
1802  int nb_filters = graph->nb_filters;
1804 
1805  if (filtergraph) {
1806  outputs = avfilter_inout_alloc();
1807  inputs = avfilter_inout_alloc();
1808  if (!outputs || !inputs) {
1809  ret = AVERROR(ENOMEM);
1810  goto fail;
1811  }
1812 
1813  outputs->name = av_strdup("in");
1814  outputs->filter_ctx = source_ctx;
1815  outputs->pad_idx = 0;
1816  outputs->next = NULL;
1817 
1818  inputs->name = av_strdup("out");
1819  inputs->filter_ctx = sink_ctx;
1820  inputs->pad_idx = 0;
1821  inputs->next = NULL;
1822 
1823  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1824  goto fail;
1825  } else {
1826  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1827  goto fail;
1828  }
1829 
1830  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1831  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1832  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1833 
1834  ret = avfilter_graph_config(graph, NULL);
1835 fail:
1836  avfilter_inout_free(&outputs);
1837  avfilter_inout_free(&inputs);
1838  return ret;
1839 }
1840 
1841 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1842 {
1844  char sws_flags_str[512] = "";
1845  char buffersrc_args[256];
1846  int ret;
1847  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1848  AVCodecParameters *codecpar = is->video_st->codecpar;
1849  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1850  AVDictionaryEntry *e = NULL;
1851  int nb_pix_fmts = 0;
1852  int i, j;
1853 
1854  for (i = 0; i < renderer_info.num_texture_formats; i++) {
1855  for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; j++) {
1856  if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
1857  pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
1858  break;
1859  }
1860  }
1861  }
1862  pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE;
1863 
1864  while ((e = av_dict_get(sws_dict, "", e, AV_DICT_IGNORE_SUFFIX))) {
1865  if (!strcmp(e->key, "sws_flags")) {
1866  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1867  } else
1868  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1869  }
1870  if (strlen(sws_flags_str))
1871  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1872 
1873  graph->scale_sws_opts = av_strdup(sws_flags_str);
1874 
1875  snprintf(buffersrc_args, sizeof(buffersrc_args),
1876  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1877  frame->width, frame->height, frame->format,
1879  codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1));
1880  if (fr.num && fr.den)
1881  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1882 
1883  if ((ret = avfilter_graph_create_filter(&filt_src,
1884  avfilter_get_by_name("buffer"),
1885  "ffplay_buffer", buffersrc_args, NULL,
1886  graph)) < 0)
1887  goto fail;
1888 
1889  ret = avfilter_graph_create_filter(&filt_out,
1890  avfilter_get_by_name("buffersink"),
1891  "ffplay_buffersink", NULL, NULL, graph);
1892  if (ret < 0)
1893  goto fail;
1894 
1895  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1896  goto fail;
1897 
1898  last_filter = filt_out;
1899 
1900 /* Note: this macro adds a filter before the lastly added filter, so the
1901  * processing order of the filters is in reverse */
1902 #define INSERT_FILT(name, arg) do { \
1903  AVFilterContext *filt_ctx; \
1904  \
1905  ret = avfilter_graph_create_filter(&filt_ctx, \
1906  avfilter_get_by_name(name), \
1907  "ffplay_" name, arg, NULL, graph); \
1908  if (ret < 0) \
1909  goto fail; \
1910  \
1911  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1912  if (ret < 0) \
1913  goto fail; \
1914  \
1915  last_filter = filt_ctx; \
1916 } while (0)
1917 
1918  if (autorotate) {
1919  double theta = get_rotation(is->video_st);
1920 
1921  if (fabs(theta - 90) < 1.0) {
1922  INSERT_FILT("transpose", "clock");
1923  } else if (fabs(theta - 180) < 1.0) {
1924  INSERT_FILT("hflip", NULL);
1925  INSERT_FILT("vflip", NULL);
1926  } else if (fabs(theta - 270) < 1.0) {
1927  INSERT_FILT("transpose", "cclock");
1928  } else if (fabs(theta) > 1.0) {
1929  char rotate_buf[64];
1930  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1931  INSERT_FILT("rotate", rotate_buf);
1932  }
1933  }
1934 
1935  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1936  goto fail;
1937 
1938  is->in_video_filter = filt_src;
1939  is->out_video_filter = filt_out;
1940 
1941 fail:
1942  return ret;
1943 }
1944 
1945 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1946 {
1948  int sample_rates[2] = { 0, -1 };
1949  int64_t channel_layouts[2] = { 0, -1 };
1950  int channels[2] = { 0, -1 };
1951  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1952  char aresample_swr_opts[512] = "";
1953  AVDictionaryEntry *e = NULL;
1954  char asrc_args[256];
1955  int ret;
1956 
1957  avfilter_graph_free(&is->agraph);
1958  if (!(is->agraph = avfilter_graph_alloc()))
1959  return AVERROR(ENOMEM);
1960 
1961  while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
1962  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
1963  if (strlen(aresample_swr_opts))
1964  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
1965  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
1966 
1967  ret = snprintf(asrc_args, sizeof(asrc_args),
1968  "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1969  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1970  is->audio_filter_src.channels,
1971  1, is->audio_filter_src.freq);
1972  if (is->audio_filter_src.channel_layout)
1973  snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1974  ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
1975 
1976  ret = avfilter_graph_create_filter(&filt_asrc,
1977  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1978  asrc_args, NULL, is->agraph);
1979  if (ret < 0)
1980  goto end;
1981 
1982 
1983  ret = avfilter_graph_create_filter(&filt_asink,
1984  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1985  NULL, NULL, is->agraph);
1986  if (ret < 0)
1987  goto end;
1988 
1989  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1990  goto end;
1991  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1992  goto end;
1993 
1994  if (force_output_format) {
1995  channel_layouts[0] = is->audio_tgt.channel_layout;
1996  channels [0] = is->audio_tgt.channels;
1997  sample_rates [0] = is->audio_tgt.freq;
1998  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
1999  goto end;
2000  if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2001  goto end;
2002  if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2003  goto end;
2004  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2005  goto end;
2006  }
2007 
2008 
2009  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2010  goto end;
2011 
2012  is->in_audio_filter = filt_asrc;
2013  is->out_audio_filter = filt_asink;
2014 
2015 end:
2016  if (ret < 0)
2017  avfilter_graph_free(&is->agraph);
2018  return ret;
2019 }
2020 #endif /* CONFIG_AVFILTER */
2021 
2022 static int audio_thread(void *arg)
2023 {
2024  VideoState *is = arg;
2025  AVFrame *frame = av_frame_alloc();
2026  Frame *af;
2027 #if CONFIG_AVFILTER
2028  int last_serial = -1;
2029  int64_t dec_channel_layout;
2030  int reconfigure;
2031 #endif
2032  int got_frame = 0;
2033  AVRational tb;
2034  int ret = 0;
2035 
2036  if (!frame)
2037  return AVERROR(ENOMEM);
2038 
2039  do {
2040  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2041  goto the_end;
2042 
2043  if (got_frame) {
2044  tb = (AVRational){1, frame->sample_rate};
2045 
2046 #if CONFIG_AVFILTER
2047  dec_channel_layout = get_valid_channel_layout(frame->channel_layout, frame->channels);
2048 
2049  reconfigure =
2050  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2051  frame->format, frame->channels) ||
2052  is->audio_filter_src.channel_layout != dec_channel_layout ||
2053  is->audio_filter_src.freq != frame->sample_rate ||
2054  is->auddec.pkt_serial != last_serial;
2055 
2056  if (reconfigure) {
2057  char buf1[1024], buf2[1024];
2058  av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2059  av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2061  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2062  is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2063  frame->sample_rate, frame->channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
2064 
2065  is->audio_filter_src.fmt = frame->format;
2066  is->audio_filter_src.channels = frame->channels;
2067  is->audio_filter_src.channel_layout = dec_channel_layout;
2068  is->audio_filter_src.freq = frame->sample_rate;
2069  last_serial = is->auddec.pkt_serial;
2070 
2071  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2072  goto the_end;
2073  }
2074 
2075  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2076  goto the_end;
2077 
2078  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2079  tb = av_buffersink_get_time_base(is->out_audio_filter);
2080 #endif
2081  if (!(af = frame_queue_peek_writable(&is->sampq)))
2082  goto the_end;
2083 
2084  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2085  af->pos = frame->pkt_pos;
2086  af->serial = is->auddec.pkt_serial;
2087  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2088 
2089  av_frame_move_ref(af->frame, frame);
2090  frame_queue_push(&is->sampq);
2091 
2092 #if CONFIG_AVFILTER
2093  if (is->audioq.serial != is->auddec.pkt_serial)
2094  break;
2095  }
2096  if (ret == AVERROR_EOF)
2097  is->auddec.finished = is->auddec.pkt_serial;
2098 #endif
2099  }
2100  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2101  the_end:
2102 #if CONFIG_AVFILTER
2103  avfilter_graph_free(&is->agraph);
2104 #endif
2105  av_frame_free(&frame);
2106  return ret;
2107 }
2108 
2109 static int decoder_start(Decoder *d, int (*fn)(void *), void *arg)
2110 {
2112  d->decoder_tid = SDL_CreateThread(fn, "decoder", arg);
2113  if (!d->decoder_tid) {
2114  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2115  return AVERROR(ENOMEM);
2116  }
2117  return 0;
2118 }
2119 
2120 static int video_thread(void *arg)
2121 {
2122  VideoState *is = arg;
2123  AVFrame *frame = av_frame_alloc();
2124  double pts;
2125  double duration;
2126  int ret;
2128  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2129 
2130 #if CONFIG_AVFILTER
2132  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2133  int last_w = 0;
2134  int last_h = 0;
2135  enum AVPixelFormat last_format = -2;
2136  int last_serial = -1;
2137  int last_vfilter_idx = 0;
2138  if (!graph) {
2139  av_frame_free(&frame);
2140  return AVERROR(ENOMEM);
2141  }
2142 
2143 #endif
2144 
2145  if (!frame) {
2146 #if CONFIG_AVFILTER
2147  avfilter_graph_free(&graph);
2148 #endif
2149  return AVERROR(ENOMEM);
2150  }
2151 
2152  for (;;) {
2153  ret = get_video_frame(is, frame);
2154  if (ret < 0)
2155  goto the_end;
2156  if (!ret)
2157  continue;
2158 
2159 #if CONFIG_AVFILTER
2160  if ( last_w != frame->width
2161  || last_h != frame->height
2162  || last_format != frame->format
2163  || last_serial != is->viddec.pkt_serial
2164  || last_vfilter_idx != is->vfilter_idx) {
2166  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2167  last_w, last_h,
2168  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2169  frame->width, frame->height,
2170  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2171  avfilter_graph_free(&graph);
2172  graph = avfilter_graph_alloc();
2173  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2174  SDL_Event event;
2175  event.type = FF_QUIT_EVENT;
2176  event.user.data1 = is;
2177  SDL_PushEvent(&event);
2178  goto the_end;
2179  }
2180  filt_in = is->in_video_filter;
2181  filt_out = is->out_video_filter;
2182  last_w = frame->width;
2183  last_h = frame->height;
2184  last_format = frame->format;
2185  last_serial = is->viddec.pkt_serial;
2186  last_vfilter_idx = is->vfilter_idx;
2187  frame_rate = av_buffersink_get_frame_rate(filt_out);
2188  }
2189 
2190  ret = av_buffersrc_add_frame(filt_in, frame);
2191  if (ret < 0)
2192  goto the_end;
2193 
2194  while (ret >= 0) {
2195  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2196 
2197  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2198  if (ret < 0) {
2199  if (ret == AVERROR_EOF)
2200  is->viddec.finished = is->viddec.pkt_serial;
2201  ret = 0;
2202  break;
2203  }
2204 
2206  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2207  is->frame_last_filter_delay = 0;
2208  tb = av_buffersink_get_time_base(filt_out);
2209 #endif
2210  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2211  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2212  ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);
2213  av_frame_unref(frame);
2214 #if CONFIG_AVFILTER
2215  if (is->videoq.serial != is->viddec.pkt_serial)
2216  break;
2217  }
2218 #endif
2219 
2220  if (ret < 0)
2221  goto the_end;
2222  }
2223  the_end:
2224 #if CONFIG_AVFILTER
2225  avfilter_graph_free(&graph);
2226 #endif
2227  av_frame_free(&frame);
2228  return 0;
2229 }
2230 
2231 static int subtitle_thread(void *arg)
2232 {
2233  VideoState *is = arg;
2234  Frame *sp;
2235  int got_subtitle;
2236  double pts;
2237 
2238  for (;;) {
2239  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2240  return 0;
2241 
2242  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2243  break;
2244 
2245  pts = 0;
2246 
2247  if (got_subtitle && sp->sub.format == 0) {
2248  if (sp->sub.pts != AV_NOPTS_VALUE)
2249  pts = sp->sub.pts / (double)AV_TIME_BASE;
2250  sp->pts = pts;
2251  sp->serial = is->subdec.pkt_serial;
2252  sp->width = is->subdec.avctx->width;
2253  sp->height = is->subdec.avctx->height;
2254  sp->uploaded = 0;
2255 
2256  /* now we can update the picture count */
2257  frame_queue_push(&is->subpq);
2258  } else if (got_subtitle) {
2259  avsubtitle_free(&sp->sub);
2260  }
2261  }
2262  return 0;
2263 }
2264 
2265 /* copy samples for viewing in editor window */
2266 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2267 {
2268  int size, len;
2269 
2270  size = samples_size / sizeof(short);
2271  while (size > 0) {
2273  if (len > size)
2274  len = size;
2275  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2276  samples += len;
2277  is->sample_array_index += len;
2279  is->sample_array_index = 0;
2280  size -= len;
2281  }
2282 }
2283 
2284 /* return the wanted number of samples to get better sync if sync_type is video
2285  * or external master clock */
2286 static int synchronize_audio(VideoState *is, int nb_samples)
2287 {
2288  int wanted_nb_samples = nb_samples;
2289 
2290  /* if not master, then we try to remove or add samples to correct the clock */
2292  double diff, avg_diff;
2293  int min_nb_samples, max_nb_samples;
2294 
2295  diff = get_clock(&is->audclk) - get_master_clock(is);
2296 
2297  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2298  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2300  /* not enough measures to have a correct estimate */
2301  is->audio_diff_avg_count++;
2302  } else {
2303  /* estimate the A-V difference */
2304  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2305 
2306  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2307  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2308  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2309  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2310  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2311  }
2312  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2313  diff, avg_diff, wanted_nb_samples - nb_samples,
2315  }
2316  } else {
2317  /* too big difference : may be initial PTS errors, so
2318  reset A-V filter */
2319  is->audio_diff_avg_count = 0;
2320  is->audio_diff_cum = 0;
2321  }
2322  }
2323 
2324  return wanted_nb_samples;
2325 }
2326 
2327 /**
2328  * Decode one audio frame and return its uncompressed size.
2329  *
2330  * The processed audio frame is decoded, converted if required, and
2331  * stored in is->audio_buf, with size in bytes given by the return
2332  * value.
2333  */
2335 {
2336  int data_size, resampled_data_size;
2337  int64_t dec_channel_layout;
2338  av_unused double audio_clock0;
2339  int wanted_nb_samples;
2340  Frame *af;
2341 
2342  if (is->paused)
2343  return -1;
2344 
2345  do {
2346 #if defined(_WIN32)
2347  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2349  return -1;
2350  av_usleep (1000);
2351  }
2352 #endif
2353  if (!(af = frame_queue_peek_readable(&is->sampq)))
2354  return -1;
2355  frame_queue_next(&is->sampq);
2356  } while (af->serial != is->audioq.serial);
2357 
2358  data_size = av_samples_get_buffer_size(NULL, af->frame->channels,
2359  af->frame->nb_samples,
2360  af->frame->format, 1);
2361 
2362  dec_channel_layout =
2365  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2366 
2367  if (af->frame->format != is->audio_src.fmt ||
2368  dec_channel_layout != is->audio_src.channel_layout ||
2369  af->frame->sample_rate != is->audio_src.freq ||
2370  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2371  swr_free(&is->swr_ctx);
2374  dec_channel_layout, af->frame->format, af->frame->sample_rate,
2375  0, NULL);
2376  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2378  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2381  swr_free(&is->swr_ctx);
2382  return -1;
2383  }
2384  is->audio_src.channel_layout = dec_channel_layout;
2385  is->audio_src.channels = af->frame->channels;
2386  is->audio_src.freq = af->frame->sample_rate;
2387  is->audio_src.fmt = af->frame->format;
2388  }
2389 
2390  if (is->swr_ctx) {
2391  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2392  uint8_t **out = &is->audio_buf1;
2393  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2394  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2395  int len2;
2396  if (out_size < 0) {
2397  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2398  return -1;
2399  }
2400  if (wanted_nb_samples != af->frame->nb_samples) {
2401  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2402  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2403  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2404  return -1;
2405  }
2406  }
2407  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2408  if (!is->audio_buf1)
2409  return AVERROR(ENOMEM);
2410  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2411  if (len2 < 0) {
2412  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2413  return -1;
2414  }
2415  if (len2 == out_count) {
2416  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2417  if (swr_init(is->swr_ctx) < 0)
2418  swr_free(&is->swr_ctx);
2419  }
2420  is->audio_buf = is->audio_buf1;
2421  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2422  } else {
2423  is->audio_buf = af->frame->data[0];
2424  resampled_data_size = data_size;
2425  }
2426 
2427  audio_clock0 = is->audio_clock;
2428  /* update the audio clock with the pts */
2429  if (!isnan(af->pts))
2430  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2431  else
2432  is->audio_clock = NAN;
2433  is->audio_clock_serial = af->serial;
2434 #ifdef DEBUG
2435  {
2436  static double last_clock;
2437  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2438  is->audio_clock - last_clock,
2439  is->audio_clock, audio_clock0);
2440  last_clock = is->audio_clock;
2441  }
2442 #endif
2443  return resampled_data_size;
2444 }
2445 
2446 /* prepare a new audio buffer */
2447 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2448 {
2449  VideoState *is = opaque;
2450  int audio_size, len1;
2451 
2453 
2454  while (len > 0) {
2455  if (is->audio_buf_index >= is->audio_buf_size) {
2456  audio_size = audio_decode_frame(is);
2457  if (audio_size < 0) {
2458  /* if error, just output silence */
2459  is->audio_buf = NULL;
2461  } else {
2462  if (is->show_mode != SHOW_MODE_VIDEO)
2463  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2464  is->audio_buf_size = audio_size;
2465  }
2466  is->audio_buf_index = 0;
2467  }
2468  len1 = is->audio_buf_size - is->audio_buf_index;
2469  if (len1 > len)
2470  len1 = len;
2471  if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2472  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2473  else {
2474  memset(stream, 0, len1);
2475  if (!is->muted && is->audio_buf)
2476  SDL_MixAudioFormat(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, is->audio_volume);
2477  }
2478  len -= len1;
2479  stream += len1;
2480  is->audio_buf_index += len1;
2481  }
2483  /* Let's assume the audio driver that is used by SDL has two periods. */
2484  if (!isnan(is->audio_clock)) {
2486  sync_clock_to_slave(&is->extclk, &is->audclk);
2487  }
2488 }
2489 
2490 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2491 {
2492  SDL_AudioSpec wanted_spec, spec;
2493  const char *env;
2494  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2495  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2496  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2497 
2498  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2499  if (env) {
2500  wanted_nb_channels = atoi(env);
2501  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2502  }
2503  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2504  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2505  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2506  }
2507  wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2508  wanted_spec.channels = wanted_nb_channels;
2509  wanted_spec.freq = wanted_sample_rate;
2510  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2511  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2512  return -1;
2513  }
2514  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2515  next_sample_rate_idx--;
2516  wanted_spec.format = AUDIO_S16SYS;
2517  wanted_spec.silence = 0;
2518  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2519  wanted_spec.callback = sdl_audio_callback;
2520  wanted_spec.userdata = opaque;
2521  while (!(audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2522  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2523  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2524  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2525  if (!wanted_spec.channels) {
2526  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2527  wanted_spec.channels = wanted_nb_channels;
2528  if (!wanted_spec.freq) {
2530  "No more combinations to try, audio open failed\n");
2531  return -1;
2532  }
2533  }
2534  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2535  }
2536  if (spec.format != AUDIO_S16SYS) {
2538  "SDL advised audio format %d is not supported!\n", spec.format);
2539  return -1;
2540  }
2541  if (spec.channels != wanted_spec.channels) {
2542  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2543  if (!wanted_channel_layout) {
2545  "SDL advised channel count %d is not supported!\n", spec.channels);
2546  return -1;
2547  }
2548  }
2549 
2550  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2551  audio_hw_params->freq = spec.freq;
2552  audio_hw_params->channel_layout = wanted_channel_layout;
2553  audio_hw_params->channels = spec.channels;
2554  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
2555  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2556  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2557  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2558  return -1;
2559  }
2560  return spec.size;
2561 }
2562 
2563 /* open a given stream. Return 0 if OK */
2564 static int stream_component_open(VideoState *is, int stream_index)
2565 {
2566  AVFormatContext *ic = is->ic;
2567  AVCodecContext *avctx;
2568  AVCodec *codec;
2569  const char *forced_codec_name = NULL;
2570  AVDictionary *opts = NULL;
2571  AVDictionaryEntry *t = NULL;
2572  int sample_rate, nb_channels;
2573  int64_t channel_layout;
2574  int ret = 0;
2575  int stream_lowres = lowres;
2576 
2577  if (stream_index < 0 || stream_index >= ic->nb_streams)
2578  return -1;
2579 
2580  avctx = avcodec_alloc_context3(NULL);
2581  if (!avctx)
2582  return AVERROR(ENOMEM);
2583 
2584  ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2585  if (ret < 0)
2586  goto fail;
2587  avctx->pkt_timebase = ic->streams[stream_index]->time_base;
2588 
2589  codec = avcodec_find_decoder(avctx->codec_id);
2590 
2591  switch(avctx->codec_type){
2592  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2593  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2594  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2595  }
2596  if (forced_codec_name)
2597  codec = avcodec_find_decoder_by_name(forced_codec_name);
2598  if (!codec) {
2599  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2600  "No codec could be found with name '%s'\n", forced_codec_name);
2601  else av_log(NULL, AV_LOG_WARNING,
2602  "No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id));
2603  ret = AVERROR(EINVAL);
2604  goto fail;
2605  }
2606 
2607  avctx->codec_id = codec->id;
2608  if (stream_lowres > codec->max_lowres) {
2609  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2610  codec->max_lowres);
2611  stream_lowres = codec->max_lowres;
2612  }
2613  avctx->lowres = stream_lowres;
2614 
2615  if (fast)
2616  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2617 
2618  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2619  if (!av_dict_get(opts, "threads", NULL, 0))
2620  av_dict_set(&opts, "threads", "auto", 0);
2621  if (stream_lowres)
2622  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2623  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2624  av_dict_set(&opts, "refcounted_frames", "1", 0);
2625  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2626  goto fail;
2627  }
2628  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2629  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2631  goto fail;
2632  }
2633 
2634  is->eof = 0;
2635  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2636  switch (avctx->codec_type) {
2637  case AVMEDIA_TYPE_AUDIO:
2638 #if CONFIG_AVFILTER
2639  {
2640  AVFilterContext *sink;
2641 
2642  is->audio_filter_src.freq = avctx->sample_rate;
2643  is->audio_filter_src.channels = avctx->channels;
2644  is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2645  is->audio_filter_src.fmt = avctx->sample_fmt;
2646  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2647  goto fail;
2648  sink = is->out_audio_filter;
2649  sample_rate = av_buffersink_get_sample_rate(sink);
2650  nb_channels = av_buffersink_get_channels(sink);
2651  channel_layout = av_buffersink_get_channel_layout(sink);
2652  }
2653 #else
2654  sample_rate = avctx->sample_rate;
2655  nb_channels = avctx->channels;
2656  channel_layout = avctx->channel_layout;
2657 #endif
2658 
2659  /* prepare audio output */
2660  if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2661  goto fail;
2662  is->audio_hw_buf_size = ret;
2663  is->audio_src = is->audio_tgt;
2664  is->audio_buf_size = 0;
2665  is->audio_buf_index = 0;
2666 
2667  /* init averaging filter */
2668  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2669  is->audio_diff_avg_count = 0;
2670  /* since we do not have a precise anough audio FIFO fullness,
2671  we correct audio sync only if larger than this threshold */
2673 
2674  is->audio_stream = stream_index;
2675  is->audio_st = ic->streams[stream_index];
2676 
2677  decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
2679  is->auddec.start_pts = is->audio_st->start_time;
2681  }
2682  if ((ret = decoder_start(&is->auddec, audio_thread, is)) < 0)
2683  goto out;
2684  SDL_PauseAudioDevice(audio_dev, 0);
2685  break;
2686  case AVMEDIA_TYPE_VIDEO:
2687  is->video_stream = stream_index;
2688  is->video_st = ic->streams[stream_index];
2689 
2690  decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
2691  if ((ret = decoder_start(&is->viddec, video_thread, is)) < 0)
2692  goto out;
2693  is->queue_attachments_req = 1;
2694  break;
2695  case AVMEDIA_TYPE_SUBTITLE:
2696  is->subtitle_stream = stream_index;
2697  is->subtitle_st = ic->streams[stream_index];
2698 
2699  decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
2700  if ((ret = decoder_start(&is->subdec, subtitle_thread, is)) < 0)
2701  goto out;
2702  break;
2703  default:
2704  break;
2705  }
2706  goto out;
2707 
2708 fail:
2709  avcodec_free_context(&avctx);
2710 out:
2711  av_dict_free(&opts);
2712 
2713  return ret;
2714 }
2715 
2716 static int decode_interrupt_cb(void *ctx)
2717 {
2718  VideoState *is = ctx;
2719  return is->abort_request;
2720 }
2721 
2722 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2723  return stream_id < 0 ||
2724  queue->abort_request ||
2726  queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2727 }
2728 
2730 {
2731  if( !strcmp(s->iformat->name, "rtp")
2732  || !strcmp(s->iformat->name, "rtsp")
2733  || !strcmp(s->iformat->name, "sdp")
2734  )
2735  return 1;
2736 
2737  if(s->pb && ( !strncmp(s->url, "rtp:", 4)
2738  || !strncmp(s->url, "udp:", 4)
2739  )
2740  )
2741  return 1;
2742  return 0;
2743 }
2744 
2745 /* this thread gets the stream from the disk or the network */
2746 static int read_thread(void *arg)
2747 {
2748  VideoState *is = arg;
2749  AVFormatContext *ic = NULL;
2750  int err, i, ret;
2751  int st_index[AVMEDIA_TYPE_NB];
2752  AVPacket pkt1, *pkt = &pkt1;
2753  int64_t stream_start_time;
2754  int pkt_in_play_range = 0;
2755  AVDictionaryEntry *t;
2756  SDL_mutex *wait_mutex = SDL_CreateMutex();
2757  int scan_all_pmts_set = 0;
2758  int64_t pkt_ts;
2759 
2760  if (!wait_mutex) {
2761  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2762  ret = AVERROR(ENOMEM);
2763  goto fail;
2764  }
2765 
2766  memset(st_index, -1, sizeof(st_index));
2767  is->last_video_stream = is->video_stream = -1;
2768  is->last_audio_stream = is->audio_stream = -1;
2769  is->last_subtitle_stream = is->subtitle_stream = -1;
2770  is->eof = 0;
2771 
2772  ic = avformat_alloc_context();
2773  if (!ic) {
2774  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2775  ret = AVERROR(ENOMEM);
2776  goto fail;
2777  }
2779  ic->interrupt_callback.opaque = is;
2780  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2781  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2782  scan_all_pmts_set = 1;
2783  }
2784  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2785  if (err < 0) {
2786  print_error(is->filename, err);
2787  ret = -1;
2788  goto fail;
2789  }
2790  if (scan_all_pmts_set)
2791  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2792 
2794  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2796  goto fail;
2797  }
2798  is->ic = ic;
2799 
2800  if (genpts)
2801  ic->flags |= AVFMT_FLAG_GENPTS;
2802 
2804 
2805  if (find_stream_info) {
2807  int orig_nb_streams = ic->nb_streams;
2808 
2809  err = avformat_find_stream_info(ic, opts);
2810 
2811  for (i = 0; i < orig_nb_streams; i++)
2812  av_dict_free(&opts[i]);
2813  av_freep(&opts);
2814 
2815  if (err < 0) {
2817  "%s: could not find codec parameters\n", is->filename);
2818  ret = -1;
2819  goto fail;
2820  }
2821  }
2822 
2823  if (ic->pb)
2824  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2825 
2826  if (seek_by_bytes < 0)
2827  seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2828 
2829  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2830 
2831  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2832  window_title = av_asprintf("%s - %s", t->value, input_filename);
2833 
2834  /* if seeking requested, we execute it */
2835  if (start_time != AV_NOPTS_VALUE) {
2836  int64_t timestamp;
2837 
2838  timestamp = start_time;
2839  /* add the stream start time */
2840  if (ic->start_time != AV_NOPTS_VALUE)
2841  timestamp += ic->start_time;
2842  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2843  if (ret < 0) {
2844  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2845  is->filename, (double)timestamp / AV_TIME_BASE);
2846  }
2847  }
2848 
2849  is->realtime = is_realtime(ic);
2850 
2851  if (show_status)
2852  av_dump_format(ic, 0, is->filename, 0);
2853 
2854  for (i = 0; i < ic->nb_streams; i++) {
2855  AVStream *st = ic->streams[i];
2856  enum AVMediaType type = st->codecpar->codec_type;
2857  st->discard = AVDISCARD_ALL;
2858  if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
2859  if (avformat_match_stream_specifier(ic, st, wanted_stream_spec[type]) > 0)
2860  st_index[type] = i;
2861  }
2862  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2863  if (wanted_stream_spec[i] && st_index[i] == -1) {
2864  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2865  st_index[i] = INT_MAX;
2866  }
2867  }
2868 
2869  if (!video_disable)
2870  st_index[AVMEDIA_TYPE_VIDEO] =
2872  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2873  if (!audio_disable)
2874  st_index[AVMEDIA_TYPE_AUDIO] =
2876  st_index[AVMEDIA_TYPE_AUDIO],
2877  st_index[AVMEDIA_TYPE_VIDEO],
2878  NULL, 0);
2880  st_index[AVMEDIA_TYPE_SUBTITLE] =
2882  st_index[AVMEDIA_TYPE_SUBTITLE],
2883  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2884  st_index[AVMEDIA_TYPE_AUDIO] :
2885  st_index[AVMEDIA_TYPE_VIDEO]),
2886  NULL, 0);
2887 
2888  is->show_mode = show_mode;
2889  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2890  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2891  AVCodecParameters *codecpar = st->codecpar;
2893  if (codecpar->width)
2894  set_default_window_size(codecpar->width, codecpar->height, sar);
2895  }
2896 
2897  /* open the streams */
2898  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2899  stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2900  }
2901 
2902  ret = -1;
2903  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2904  ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2905  }
2906  if (is->show_mode == SHOW_MODE_NONE)
2907  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2908 
2909  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2910  stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2911  }
2912 
2913  if (is->video_stream < 0 && is->audio_stream < 0) {
2914  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2915  is->filename);
2916  ret = -1;
2917  goto fail;
2918  }
2919 
2920  if (infinite_buffer < 0 && is->realtime)
2921  infinite_buffer = 1;
2922 
2923  for (;;) {
2924  if (is->abort_request)
2925  break;
2926  if (is->paused != is->last_paused) {
2927  is->last_paused = is->paused;
2928  if (is->paused)
2929  is->read_pause_return = av_read_pause(ic);
2930  else
2931  av_read_play(ic);
2932  }
2933 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2934  if (is->paused &&
2935  (!strcmp(ic->iformat->name, "rtsp") ||
2936  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2937  /* wait 10 ms to avoid trying to get another packet */
2938  /* XXX: horrible */
2939  SDL_Delay(10);
2940  continue;
2941  }
2942 #endif
2943  if (is->seek_req) {
2944  int64_t seek_target = is->seek_pos;
2945  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2946  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2947 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2948 // of the seek_pos/seek_rel variables
2949 
2950  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2951  if (ret < 0) {
2953  "%s: error while seeking\n", is->ic->url);
2954  } else {
2955  if (is->audio_stream >= 0) {
2956  packet_queue_flush(&is->audioq);
2957  packet_queue_put(&is->audioq, &flush_pkt);
2958  }
2959  if (is->subtitle_stream >= 0) {
2961  packet_queue_put(&is->subtitleq, &flush_pkt);
2962  }
2963  if (is->video_stream >= 0) {
2964  packet_queue_flush(&is->videoq);
2965  packet_queue_put(&is->videoq, &flush_pkt);
2966  }
2967  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2968  set_clock(&is->extclk, NAN, 0);
2969  } else {
2970  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
2971  }
2972  }
2973  is->seek_req = 0;
2974  is->queue_attachments_req = 1;
2975  is->eof = 0;
2976  if (is->paused)
2977  step_to_next_frame(is);
2978  }
2979  if (is->queue_attachments_req) {
2981  AVPacket copy = { 0 };
2982  if ((ret = av_packet_ref(&copy, &is->video_st->attached_pic)) < 0)
2983  goto fail;
2984  packet_queue_put(&is->videoq, &copy);
2986  }
2987  is->queue_attachments_req = 0;
2988  }
2989 
2990  /* if the queue are full, no need to read more */
2991  if (infinite_buffer<1 &&
2992  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2993  || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
2996  /* wait 10 ms */
2997  SDL_LockMutex(wait_mutex);
2998  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2999  SDL_UnlockMutex(wait_mutex);
3000  continue;
3001  }
3002  if (!is->paused &&
3003  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3004  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3005  if (loop != 1 && (!loop || --loop)) {
3006  stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
3007  } else if (autoexit) {
3008  ret = AVERROR_EOF;
3009  goto fail;
3010  }
3011  }
3012  ret = av_read_frame(ic, pkt);
3013  if (ret < 0) {
3014  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3015  if (is->video_stream >= 0)
3017  if (is->audio_stream >= 0)
3019  if (is->subtitle_stream >= 0)
3021  is->eof = 1;
3022  }
3023  if (ic->pb && ic->pb->error)
3024  break;
3025  SDL_LockMutex(wait_mutex);
3026  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3027  SDL_UnlockMutex(wait_mutex);
3028  continue;
3029  } else {
3030  is->eof = 0;
3031  }
3032  /* check if packet is in play range specified by user, then queue, otherwise discard */
3033  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3034  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3035  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3036  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3037  av_q2d(ic->streams[pkt->stream_index]->time_base) -
3038  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3039  <= ((double)duration / 1000000);
3040  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3041  packet_queue_put(&is->audioq, pkt);
3042  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3044  packet_queue_put(&is->videoq, pkt);
3045  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3046  packet_queue_put(&is->subtitleq, pkt);
3047  } else {
3048  av_packet_unref(pkt);
3049  }
3050  }
3051 
3052  ret = 0;
3053  fail:
3054  if (ic && !is->ic)
3055  avformat_close_input(&ic);
3056 
3057  if (ret != 0) {
3058  SDL_Event event;
3059 
3060  event.type = FF_QUIT_EVENT;
3061  event.user.data1 = is;
3062  SDL_PushEvent(&event);
3063  }
3064  SDL_DestroyMutex(wait_mutex);
3065  return 0;
3066 }
3067 
3068 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
3069 {
3070  VideoState *is;
3071 
3072  is = av_mallocz(sizeof(VideoState));
3073  if (!is)
3074  return NULL;
3075  is->filename = av_strdup(filename);
3076  if (!is->filename)
3077  goto fail;
3078  is->iformat = iformat;
3079  is->ytop = 0;
3080  is->xleft = 0;
3081 
3082  /* start video display */
3083  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3084  goto fail;
3085  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3086  goto fail;
3087  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3088  goto fail;
3089 
3090  if (packet_queue_init(&is->videoq) < 0 ||
3091  packet_queue_init(&is->audioq) < 0 ||
3092  packet_queue_init(&is->subtitleq) < 0)
3093  goto fail;
3094 
3095  if (!(is->continue_read_thread = SDL_CreateCond())) {
3096  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3097  goto fail;
3098  }
3099 
3100  init_clock(&is->vidclk, &is->videoq.serial);
3101  init_clock(&is->audclk, &is->audioq.serial);
3102  init_clock(&is->extclk, &is->extclk.serial);
3103  is->audio_clock_serial = -1;
3104  if (startup_volume < 0)
3105  av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
3106  if (startup_volume > 100)
3107  av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
3108  startup_volume = av_clip(startup_volume, 0, 100);
3109  startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
3111  is->muted = 0;
3112  is->av_sync_type = av_sync_type;
3113  is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
3114  if (!is->read_tid) {
3115  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3116 fail:
3117  stream_close(is);
3118  return NULL;
3119  }
3120  return is;
3121 }
3122 
3124 {
3125  AVFormatContext *ic = is->ic;
3126  int start_index, stream_index;
3127  int old_index;
3128  AVStream *st;
3129  AVProgram *p = NULL;
3130  int nb_streams = is->ic->nb_streams;
3131 
3132  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3133  start_index = is->last_video_stream;
3134  old_index = is->video_stream;
3135  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3136  start_index = is->last_audio_stream;
3137  old_index = is->audio_stream;
3138  } else {
3139  start_index = is->last_subtitle_stream;
3140  old_index = is->subtitle_stream;
3141  }
3142  stream_index = start_index;
3143 
3144  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3146  if (p) {
3147  nb_streams = p->nb_stream_indexes;
3148  for (start_index = 0; start_index < nb_streams; start_index++)
3149  if (p->stream_index[start_index] == stream_index)
3150  break;
3151  if (start_index == nb_streams)
3152  start_index = -1;
3153  stream_index = start_index;
3154  }
3155  }
3156 
3157  for (;;) {
3158  if (++stream_index >= nb_streams)
3159  {
3160  if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3161  {
3162  stream_index = -1;
3163  is->last_subtitle_stream = -1;
3164  goto the_end;
3165  }
3166  if (start_index == -1)
3167  return;
3168  stream_index = 0;
3169  }
3170  if (stream_index == start_index)
3171  return;
3172  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3173  if (st->codecpar->codec_type == codec_type) {
3174  /* check that parameters are OK */
3175  switch (codec_type) {
3176  case AVMEDIA_TYPE_AUDIO:
3177  if (st->codecpar->sample_rate != 0 &&
3178  st->codecpar->channels != 0)
3179  goto the_end;
3180  break;
3181  case AVMEDIA_TYPE_VIDEO:
3182  case AVMEDIA_TYPE_SUBTITLE:
3183  goto the_end;
3184  default:
3185  break;
3186  }
3187  }
3188  }
3189  the_end:
3190  if (p && stream_index != -1)
3191  stream_index = p->stream_index[stream_index];
3192  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3193  av_get_media_type_string(codec_type),
3194  old_index,
3195  stream_index);
3196 
3197  stream_component_close(is, old_index);
3198  stream_component_open(is, stream_index);
3199 }
3200 
3201 
3203 {
3205  SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
3206 }
3207 
3209 {
3210  int next = is->show_mode;
3211  do {
3212  next = (next + 1) % SHOW_MODE_NB;
3213  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3214  if (is->show_mode != next) {
3215  is->force_refresh = 1;
3216  is->show_mode = next;
3217  }
3218 }
3219 
3220 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3221  double remaining_time = 0.0;
3222  SDL_PumpEvents();
3223  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3225  SDL_ShowCursor(0);
3226  cursor_hidden = 1;
3227  }
3228  if (remaining_time > 0.0)
3229  av_usleep((int64_t)(remaining_time * 1000000.0));
3230  remaining_time = REFRESH_RATE;
3231  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3232  video_refresh(is, &remaining_time);
3233  SDL_PumpEvents();
3234  }
3235 }
3236 
3237 static void seek_chapter(VideoState *is, int incr)
3238 {
3239  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3240  int i;
3241 
3242  if (!is->ic->nb_chapters)
3243  return;
3244 
3245  /* find the current chapter */
3246  for (i = 0; i < is->ic->nb_chapters; i++) {
3247  AVChapter *ch = is->ic->chapters[i];
3248  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3249  i--;
3250  break;
3251  }
3252  }
3253 
3254  i += incr;
3255  i = FFMAX(i, 0);
3256  if (i >= is->ic->nb_chapters)
3257  return;
3258 
3259  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3260  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3261  AV_TIME_BASE_Q), 0, 0);
3262 }
3263 
3264 /* handle an event sent by the GUI */
3265 static void event_loop(VideoState *cur_stream)
3266 {
3267  SDL_Event event;
3268  double incr, pos, frac;
3269 
3270  for (;;) {
3271  double x;
3272  refresh_loop_wait_event(cur_stream, &event);
3273  switch (event.type) {
3274  case SDL_KEYDOWN:
3275  if (exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
3276  do_exit(cur_stream);
3277  break;
3278  }
3279  // If we don't yet have a window, skip all key events, because read_thread might still be initializing...
3280  if (!cur_stream->width)
3281  continue;
3282  switch (event.key.keysym.sym) {
3283  case SDLK_f:
3284  toggle_full_screen(cur_stream);
3285  cur_stream->force_refresh = 1;
3286  break;
3287  case SDLK_p:
3288  case SDLK_SPACE:
3289  toggle_pause(cur_stream);
3290  break;
3291  case SDLK_m:
3292  toggle_mute(cur_stream);
3293  break;
3294  case SDLK_KP_MULTIPLY:
3295  case SDLK_0:
3296  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3297  break;
3298  case SDLK_KP_DIVIDE:
3299  case SDLK_9:
3300  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3301  break;
3302  case SDLK_s: // S: Step to next frame
3303  step_to_next_frame(cur_stream);
3304  break;
3305  case SDLK_a:
3307  break;
3308  case SDLK_v:
3310  break;
3311  case SDLK_c:
3315  break;
3316  case SDLK_t:
3318  break;
3319  case SDLK_w:
3320 #if CONFIG_AVFILTER
3321  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3322  if (++cur_stream->vfilter_idx >= nb_vfilters)
3323  cur_stream->vfilter_idx = 0;
3324  } else {
3325  cur_stream->vfilter_idx = 0;
3326  toggle_audio_display(cur_stream);
3327  }
3328 #else
3329  toggle_audio_display(cur_stream);
3330 #endif
3331  break;
3332  case SDLK_PAGEUP:
3333  if (cur_stream->ic->nb_chapters <= 1) {
3334  incr = 600.0;
3335  goto do_seek;
3336  }
3337  seek_chapter(cur_stream, 1);
3338  break;
3339  case SDLK_PAGEDOWN:
3340  if (cur_stream->ic->nb_chapters <= 1) {
3341  incr = -600.0;
3342  goto do_seek;
3343  }
3344  seek_chapter(cur_stream, -1);
3345  break;
3346  case SDLK_LEFT:
3347  incr = seek_interval ? -seek_interval : -10.0;
3348  goto do_seek;
3349  case SDLK_RIGHT:
3350  incr = seek_interval ? seek_interval : 10.0;
3351  goto do_seek;
3352  case SDLK_UP:
3353  incr = 60.0;
3354  goto do_seek;
3355  case SDLK_DOWN:
3356  incr = -60.0;
3357  do_seek:
3358  if (seek_by_bytes) {
3359  pos = -1;
3360  if (pos < 0 && cur_stream->video_stream >= 0)
3361  pos = frame_queue_last_pos(&cur_stream->pictq);
3362  if (pos < 0 && cur_stream->audio_stream >= 0)
3363  pos = frame_queue_last_pos(&cur_stream->sampq);
3364  if (pos < 0)
3365  pos = avio_tell(cur_stream->ic->pb);
3366  if (cur_stream->ic->bit_rate)
3367  incr *= cur_stream->ic->bit_rate / 8.0;
3368  else
3369  incr *= 180000.0;
3370  pos += incr;
3371  stream_seek(cur_stream, pos, incr, 1);
3372  } else {
3373  pos = get_master_clock(cur_stream);
3374  if (isnan(pos))
3375  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3376  pos += incr;
3377  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3378  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3379  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3380  }
3381  break;
3382  default:
3383  break;
3384  }
3385  break;
3386  case SDL_MOUSEBUTTONDOWN:
3387  if (exit_on_mousedown) {
3388  do_exit(cur_stream);
3389  break;
3390  }
3391  if (event.button.button == SDL_BUTTON_LEFT) {
3392  static int64_t last_mouse_left_click = 0;
3393  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3394  toggle_full_screen(cur_stream);
3395  cur_stream->force_refresh = 1;
3396  last_mouse_left_click = 0;
3397  } else {
3398  last_mouse_left_click = av_gettime_relative();
3399  }
3400  }
3401  case SDL_MOUSEMOTION:
3402  if (cursor_hidden) {
3403  SDL_ShowCursor(1);
3404  cursor_hidden = 0;
3405  }
3407  if (event.type == SDL_MOUSEBUTTONDOWN) {
3408  if (event.button.button != SDL_BUTTON_RIGHT)
3409  break;
3410  x = event.button.x;
3411  } else {
3412  if (!(event.motion.state & SDL_BUTTON_RMASK))
3413  break;
3414  x = event.motion.x;
3415  }
3416  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3417  uint64_t size = avio_size(cur_stream->ic->pb);
3418  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3419  } else {
3420  int64_t ts;
3421  int ns, hh, mm, ss;
3422  int tns, thh, tmm, tss;
3423  tns = cur_stream->ic->duration / 1000000LL;
3424  thh = tns / 3600;
3425  tmm = (tns % 3600) / 60;
3426  tss = (tns % 60);
3427  frac = x / cur_stream->width;
3428  ns = frac * tns;
3429  hh = ns / 3600;
3430  mm = (ns % 3600) / 60;
3431  ss = (ns % 60);
3433  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3434  hh, mm, ss, thh, tmm, tss);
3435  ts = frac * cur_stream->ic->duration;
3436  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3437  ts += cur_stream->ic->start_time;
3438  stream_seek(cur_stream, ts, 0, 0);
3439  }
3440  break;
3441  case SDL_WINDOWEVENT:
3442  switch (event.window.event) {
3443  case SDL_WINDOWEVENT_RESIZED:
3444  screen_width = cur_stream->width = event.window.data1;
3445  screen_height = cur_stream->height = event.window.data2;
3446  if (cur_stream->vis_texture) {
3447  SDL_DestroyTexture(cur_stream->vis_texture);
3448  cur_stream->vis_texture = NULL;
3449  }
3450  case SDL_WINDOWEVENT_EXPOSED:
3451  cur_stream->force_refresh = 1;
3452  }
3453  break;
3454  case SDL_QUIT:
3455  case FF_QUIT_EVENT:
3456  do_exit(cur_stream);
3457  break;
3458  default:
3459  break;
3460  }
3461  }
3462 }
3463 
3464 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3465 {
3466  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3467  return opt_default(NULL, "video_size", arg);
3468 }
3469 
3470 static int opt_width(void *optctx, const char *opt, const char *arg)
3471 {
3472  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3473  return 0;
3474 }
3475 
3476 static int opt_height(void *optctx, const char *opt, const char *arg)
3477 {
3478  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3479  return 0;
3480 }
3481 
3482 static int opt_format(void *optctx, const char *opt, const char *arg)
3483 {
3484  file_iformat = av_find_input_format(arg);
3485  if (!file_iformat) {
3486  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3487  return AVERROR(EINVAL);
3488  }
3489  return 0;
3490 }
3491 
3492 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3493 {
3494  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3495  return opt_default(NULL, "pixel_format", arg);
3496 }
3497 
3498 static int opt_sync(void *optctx, const char *opt, const char *arg)
3499 {
3500  if (!strcmp(arg, "audio"))
3502  else if (!strcmp(arg, "video"))
3504  else if (!strcmp(arg, "ext"))
3506  else {
3507  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3508  exit(1);
3509  }
3510  return 0;
3511 }
3512 
3513 static int opt_seek(void *optctx, const char *opt, const char *arg)
3514 {
3515  start_time = parse_time_or_die(opt, arg, 1);
3516  return 0;
3517 }
3518 
3519 static int opt_duration(void *optctx, const char *opt, const char *arg)
3520 {
3521  duration = parse_time_or_die(opt, arg, 1);
3522  return 0;
3523 }
3524 
3525 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3526 {
3527  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3528  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3529  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3530  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3531  return 0;
3532 }
3533 
3534 static void opt_input_file(void *optctx, const char *filename)
3535 {
3536  if (input_filename) {
3538  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3539  filename, input_filename);
3540  exit(1);
3541  }
3542  if (!strcmp(filename, "-"))
3543  filename = "pipe:";
3544  input_filename = filename;
3545 }
3546 
3547 static int opt_codec(void *optctx, const char *opt, const char *arg)
3548 {
3549  const char *spec = strchr(opt, ':');
3550  if (!spec) {
3552  "No media specifier was specified in '%s' in option '%s'\n",
3553  arg, opt);
3554  return AVERROR(EINVAL);
3555  }
3556  spec++;
3557  switch (spec[0]) {
3558  case 'a' : audio_codec_name = arg; break;
3559  case 's' : subtitle_codec_name = arg; break;
3560  case 'v' : video_codec_name = arg; break;
3561  default:
3563  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3564  return AVERROR(EINVAL);
3565  }
3566  return 0;
3567 }
3568 
3569 static int dummy;
3570 
3571 static const OptionDef options[] = {
3573  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3574  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3575  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3576  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3577  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3578  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3579  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3580  { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3581  { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3582  { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3583  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3584  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3585  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3586  { "seek_interval", OPT_FLOAT | HAS_ARG, { &seek_interval }, "set seek interval for left/right keys, in seconds", "seconds" },
3587  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3588  { "noborder", OPT_BOOL, { &borderless }, "borderless window" },
3589  { "volume", OPT_INT | HAS_ARG, { &startup_volume}, "set startup volume 0=min 100=max", "volume" },
3590  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3591  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3592  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3593  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3594  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3595  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3596  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3597  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3598  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3599  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3600  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3601  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3602  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3603  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3604  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3605 #if CONFIG_AVFILTER
3606  { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3607  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3608 #endif
3609  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3610  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3611  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3612  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3613  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3614  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3615  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3616  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3617  { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3618  { "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
3619  "read and decode the streams to fill missing information with heuristics" },
3620  { NULL, },
3621 };
3622 
3623 static void show_usage(void)
3624 {
3625  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3626  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3627  av_log(NULL, AV_LOG_INFO, "\n");
3628 }
3629 
3630 void show_help_default(const char *opt, const char *arg)
3631 {
3633  show_usage();
3634  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3635  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3636  printf("\n");
3639 #if !CONFIG_AVFILTER
3641 #else
3643 #endif
3644  printf("\nWhile playing:\n"
3645  "q, ESC quit\n"
3646  "f toggle full screen\n"
3647  "p, SPC pause\n"
3648  "m toggle mute\n"
3649  "9, 0 decrease and increase volume respectively\n"
3650  "/, * decrease and increase volume respectively\n"
3651  "a cycle audio channel in the current program\n"
3652  "v cycle video channel\n"
3653  "t cycle subtitle channel in the current program\n"
3654  "c cycle program\n"
3655  "w cycle video filters or show modes\n"
3656  "s activate frame-step mode\n"
3657  "left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
3658  "down/up seek backward/forward 1 minute\n"
3659  "page down/page up seek backward/forward 10 minutes\n"
3660  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3661  "left double-click toggle full screen\n"
3662  );
3663 }
3664 
3665 /* Called from the main */
3666 int main(int argc, char **argv)
3667 {
3668  int flags;
3669  VideoState *is;
3670 
3671  init_dynload();
3672 
3674  parse_loglevel(argc, argv, options);
3675 
3676  /* register all codecs, demux and protocols */
3677 #if CONFIG_AVDEVICE
3679 #endif
3681 
3682  init_opts();
3683 
3684  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3685  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3686 
3687  show_banner(argc, argv, options);
3688 
3689  parse_options(NULL, argc, argv, options, opt_input_file);
3690 
3691  if (!input_filename) {
3692  show_usage();
3693  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3695  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3696  exit(1);
3697  }
3698 
3699  if (display_disable) {
3700  video_disable = 1;
3701  }
3702  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3703  if (audio_disable)
3704  flags &= ~SDL_INIT_AUDIO;
3705  else {
3706  /* Try to work around an occasional ALSA buffer underflow issue when the
3707  * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3708  if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3709  SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
3710  }
3711  if (display_disable)
3712  flags &= ~SDL_INIT_VIDEO;
3713  if (SDL_Init (flags)) {
3714  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3715  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3716  exit(1);
3717  }
3718 
3719  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3720  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3721 
3722  av_init_packet(&flush_pkt);
3723  flush_pkt.data = (uint8_t *)&flush_pkt;
3724 
3725  if (!display_disable) {
3726  int flags = SDL_WINDOW_HIDDEN;
3727  if (borderless)
3728  flags |= SDL_WINDOW_BORDERLESS;
3729  else
3730  flags |= SDL_WINDOW_RESIZABLE;
3731  window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
3732  SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
3733  if (window) {
3734  renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
3735  if (!renderer) {
3736  av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
3737  renderer = SDL_CreateRenderer(window, -1, 0);
3738  }
3739  if (renderer) {
3740  if (!SDL_GetRendererInfo(renderer, &renderer_info))
3741  av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", renderer_info.name);
3742  }
3743  }
3744  if (!window || !renderer || !renderer_info.num_texture_formats) {
3745  av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError());
3746  do_exit(NULL);
3747  }
3748  }
3749 
3750  is = stream_open(input_filename, file_iformat);
3751  if (!is) {
3752  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3753  do_exit(NULL);
3754  }
3755 
3756  event_loop(is);
3757 
3758  /* never returns */
3759 
3760  return 0;
3761 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1580
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:94
AVFilterContext ** filters
Definition: avfilter.h:842
static void packet_queue_abort(PacketQueue *q)
Definition: ffplay.c:519
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:488
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3525
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:120
static void frame_queue_push(FrameQueue *f)
Definition: ffplay.c:773
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:81
static SDL_AudioDeviceID audio_dev
Definition: ffplay.c:366
static void video_image_display(VideoState *is)
Definition: ffplay.c:974
#define NULL
Definition: coverity.c:32
Clock audclk
Definition: ffplay.c:219
const char const char void * val
Definition: avisynth_c.h:771
#define AVFMT_NOBINSEARCH
Format does not allow to fall back on binary search via read_timestamp.
Definition: avformat.h:475
static void packet_queue_flush(PacketQueue *q)
Definition: ffplay.c:494
int width
Definition: ffplay.c:292
#define OPT_EXPERT
Definition: cmdutils.h:163
static double get_clock(Clock *c)
Definition: ffplay.c:1375
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:336
enum AVSampleFormat fmt
Definition: ffplay.c:138
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3476
static const struct TextureFormatEntry sdl_texture_format_map[]
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffplay.c:64
static void copy(const float *p1, float *p2, const int length)
SDL_cond * cond
Definition: ffplay.c:178
static const char * format[]
Definition: af_aiir.c:330
int64_t pts_correction_num_faulty_dts
Number of incorrect PTS values so far.
Definition: avcodec.h:3093
static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
Definition: ffplay.c:2490
FrameQueue pictq
Definition: ffplay.c:223
Decoder auddec
Definition: ffplay.c:227
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:92
AVStream * subtitle_st
Definition: ffplay.c:277
This structure describes decoded (raw) audio or video data.
Definition: frame.h:218
#define VIDEO_PICTURE_QUEUE_SIZE
Definition: ffplay.c:129
static SDL_Renderer * renderer
Definition: ffplay.c:364
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3841
#define SWS_BICUBIC
Definition: swscale.h:60
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1629
double rdftspeed
Definition: ffplay.c:344
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
double frame_timer
Definition: ffplay.c:280
static AVInputFormat * file_iformat
Definition: ffplay.c:310
#define OPT_VIDEO
Definition: cmdutils.h:165
static void opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3534
double get_rotation(AVStream *st)
Definition: cmdutils.c:2175
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:490
misc image utilities
static int opt_format(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3482
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:83
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AVRational next_pts_tb
Definition: ffplay.c:199
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1423
Main libavfilter public API header.
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
int rindex
Definition: ffplay.c:171
#define AV_SYNC_FRAMEDUP_THRESHOLD
Definition: ffplay.c:84
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:79
static int default_height
Definition: ffplay.c:314
Memory buffer source API.
double frame_last_filter_delay
Definition: ffplay.c:282
FrameQueue sampq
Definition: ffplay.c:225
enum VideoState::ShowMode show_mode
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:203
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:492
AVRational sample_aspect_ratio
Video only.
Definition: avcodec.h:3975
int seek_flags
Definition: ffplay.c:212
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:1014
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:708
int serial
Definition: ffplay.c:124
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio...
Definition: utils.c:5023
channels
Definition: aptx.c:30
#define OPT_AUDIO
Definition: cmdutils.h:166
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3123
int num
Numerator.
Definition: rational.h:59
static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3492
int size
Definition: avcodec.h:1445
const char * b
Definition: vf_curves.c:113
static void stream_toggle_pause(VideoState *is)
Definition: ffplay.c:1487
#define DEBUG
Definition: vf_framerate.c:29
MyAVPacketList * first_pkt
Definition: ffplay.c:119
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
Definition: ffplay.c:1473
int av_log2(unsigned v)
Definition: intmath.c:26
static int seek_by_bytes
Definition: ffplay.c:321
double audio_diff_cum
Definition: ffplay.c:237
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
#define REFRESH_RATE
Definition: ffplay.c:100
AVInputFormat * iformat
Definition: ffplay.c:205
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:493
enum AVMediaType codec_type
Definition: rtp.c:37
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1473
AVCodecContext * avctx
Definition: ffplay.c:191
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1180
int paused
Definition: ffplay.c:208
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3547
static AVStream * video_stream
static unsigned sws_flags
Definition: ffplay.c:110
int abort_request
Definition: ffplay.c:123
AVS_VideoFrame int int int int new_height
Definition: avisynth_c.h:818
unsigned num_rects
Definition: avcodec.h:3879
static void set_clock_at(Clock *c, double pts, int serial, double time)
Definition: ffplay.c:1387
static void toggle_pause(VideoState *is)
Definition: ffplay.c:1500
int out_size
Definition: movenc.c:55
double audio_diff_threshold
Definition: ffplay.c:239
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:580
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
discard all
Definition: avcodec.h:802
int64_t channel_layout
Definition: ffplay.c:137
static AVPacket pkt
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
Definition: options.c:95
static int audio_disable
Definition: ffplay.c:317
AVStream * audio_st
Definition: ffplay.c:241
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:1029
#define AV_PIX_FMT_RGB444
Definition: pixfmt.h:368
static const char * audio_codec_name
Definition: ffplay.c:341
#define fn(a)
#define OPT_FLOAT
Definition: cmdutils.h:168
int serial
Definition: ffplay.c:157
AVCodec.
Definition: avcodec.h:3423
static void seek_chapter(VideoState *is, int incr)
Definition: ffplay.c:3237
double pts_drift
Definition: ffplay.c:145
#define CMDUTILS_COMMON_OPTIONS
Definition: cmdutils.h:215
AVDictionary * filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, AVCodec *codec)
Filter out options for given codec.
Definition: cmdutils.c:2079
This struct describes the properties of an encoded stream.
Definition: avcodec.h:3891
int width
Definition: ffplay.c:161
functionally identical to above
Definition: pixfmt.h:494
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:845
AVStream * video_st
Definition: ffplay.c:284
Clock extclk
Definition: ffplay.c:221
static VideoState * stream_open(const char *filename, AVInputFormat *iformat)
Definition: ffplay.c:3068
void * opaque
Definition: avio.h:60
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
struct SwsContext * img_convert_ctx
Definition: ffplay.c:287
AVSubtitleRect ** rects
Definition: avcodec.h:3880
Format I/O context.
Definition: avformat.h:1351
static void toggle_audio_display(VideoState *is)
Definition: ffplay.c:3208
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: utils.c:5046
Definition: ffplay.c:154
memory buffer sink API for audio and video
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:294
static int16_t block[64]
Definition: dct.c:115
int av_sync_type
Definition: ffplay.c:233
unsigned int nb_stream_indexes
Definition: avformat.h:1273
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
int rindex_shown
Definition: ffplay.c:176
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:3843
double pts
Definition: ffplay.c:158
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
Public dictionary API.
double audio_diff_avg_coef
Definition: ffplay.c:238
AVRational start_pts_tb
Definition: ffplay.c:197
static int read_thread(void *arg)
Definition: ffplay.c:2746
int keep_last
Definition: ffplay.c:175
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:135
int rdft_bits
Definition: ffplay.c:268
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:889
int size
Definition: ffplay.c:121
static Frame * frame_queue_peek(FrameQueue *f)
Definition: ffplay.c:726
static int64_t start_time
Definition: ffplay.c:328
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2196
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
Definition: cmdutils.c:99
uint8_t
static int nb_streams
Definition: ffprobe.c:276
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
int opt_default(void *optctx, const char *opt, const char *arg)
Fallback for options that are not explicitly handled, these will be parsed through AVOptions...
Definition: cmdutils.c:545
static int default_width
Definition: ffplay.c:313
int last_video_stream
Definition: ffplay.c:304
int width
Video only.
Definition: avcodec.h:3965
int last_subtitle_stream
Definition: ffplay.c:304
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
AVOptions.
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:661
#define HAS_ARG
Definition: cmdutils.h:161
int audio_hw_buf_size
Definition: ffplay.c:243
static void stream_component_close(VideoState *is, int stream_index)
Definition: ffplay.c:1204
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:202
static int decode_interrupt_cb(void *ctx)
Definition: ffplay.c:2716
#define f(width, name)
Definition: cbs_vp9.c:255
int64_t duration
Definition: ffplay.c:122
struct SwrContext * swr_ctx
Definition: ffplay.c:257
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1462
int finished
Definition: ffplay.c:193
static void event_loop(VideoState *cur_stream)
Definition: ffplay.c:3265
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
Definition: ffplay.c:404
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
Definition: utils.c:5067
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:311
static void packet_queue_destroy(PacketQueue *q)
Definition: ffplay.c:512
#define AVFMT_FLAG_GENPTS
Generate missing pts even if it requires parsing future frames.
Definition: avformat.h:1483
static int framedrop
Definition: ffplay.c:338
SDL_Texture * vis_texture
Definition: ffplay.c:272
void init_opts(void)
Initialize the cmdutils option system, in particular allocate the *_opts contexts.
Definition: cmdutils.c:85
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1419
AVPacket pkt
Definition: ffplay.c:113
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: utils.c:2087
int bytes_per_sec
Definition: ffplay.c:140
static AVFrame * frame
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:144
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
static float seek_interval
Definition: ffplay.c:322
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
static int64_t audio_callback_time
Definition: ffplay.c:357
#define height
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
static int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
Definition: ffplay.c:415
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1482
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
Definition: ffplay.c:539
static void sigterm_handler(int sig)
Definition: ffplay.c:1319
uint8_t * data
Definition: avcodec.h:1444
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:383
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
int freq
Definition: ffplay.c:135
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:647
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:4960
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
Frame queue[FRAME_QUEUE_SIZE]
Definition: ffplay.c:170
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define sp
Definition: regdef.h:63
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
Definition: ffplay.c:143
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:2764
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:168
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:506
external API header
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:86
ptrdiff_t size
Definition: opengl_enc.c:101
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:3844
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:557
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags, int alt_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:177
#define flags(name, subs,...)
Definition: cbs_h2645.c:263
static int64_t duration
Definition: ffplay.c:329
AVRational sar
Definition: ffplay.c:164
unsigned int * stream_index
Definition: avformat.h:1272
#define av_log(a,...)
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
Definition: opt.h:276
PacketQueue videoq
Definition: ffplay.c:285
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:601
AVDictionary ** setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts)
Setup AVCodecContext options for avformat_find_stream_info().
Definition: cmdutils.c:2136
AVDictionary * format_opts
Definition: cmdutils.c:73
static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
Definition: ffplay.c:834
static int borderless
Definition: ffplay.c:324
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:308
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:104
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
static void toggle_mute(VideoState *is)
Definition: ffplay.c:1506
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:563
Main libavdevice API header.
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
Definition: utils.c:4184
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:3069
libswresample public header
enum AVCodecID id
Definition: avcodec.h:3437
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: utils.c:4167
int audio_diff_avg_count
Definition: ffplay.c:240
int ytop
Definition: ffplay.c:292
int width
Definition: frame.h:276
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:69
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1591
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:355
int seek_req
Definition: ffplay.c:211
#define AV_PIX_FMT_NE(be, le)
Definition: pixfmt.h:349
int(* callback)(void *)
Definition: avio.h:59
#define FRAME_QUEUE_SIZE
Definition: ffplay.c:132
static void video_refresh(void *opaque, double *remaining_time)
Definition: ffplay.c:1575
int read_pause_return
Definition: ffplay.c:215
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:471
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:291
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3842
#define AVERROR(e)
Definition: error.h:43
static AVStream * audio_stream
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:783
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
The libswresample context.
#define MIN_FRAMES
Definition: ffplay.c:67
static int decoder_start(Decoder *d, int(*fn)(void *), void *arg)
Definition: ffplay.c:2109
RDFTContext * rdft
Definition: ffplay.c:267
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:799
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
#define EXTERNAL_CLOCK_MAX_FRAMES
Definition: ffplay.c:69
char * url
input or output URL.
Definition: avformat.h:1447
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:744
static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
Definition: ffplay.c:466
static int autorotate
Definition: ffplay.c:352
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:463
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:106
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: utils.c:4251
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1526
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
enum AVMediaType codec_type
General type of the encoded data.
Definition: avcodec.h:3895
const char * arg
Definition: jacosubdec.c:66
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:561
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:474
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:423
AVChapter ** chapters
Definition: avformat.h:1581
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:345
uint16_t width
Definition: gdv.c:47
simple assert() macros that are a bit more flexible than ISO C assert().
static void stream_close(VideoState *is)
Definition: ffplay.c:1261
static int find_stream_info
Definition: ffplay.c:353
#define AV_PIX_FMT_0BGR32
Definition: pixfmt.h:357
int video_stream
Definition: ffplay.c:283
static int video_open(VideoState *is)
Definition: ffplay.c:1332
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
int * queue_serial
Definition: ffplay.c:150
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1407
int xpos
Definition: ffplay.c:270
int channels
Definition: ffplay.c:136
static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
Definition: ffplay.c:889
static enum ShowMode show_mode
Definition: ffplay.c:340
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1268
#define FFMAX(a, b)
Definition: common.h:94
static void packet_queue_start(PacketQueue *q)
Definition: ffplay.c:530
static const OptionDef options[]
Definition: ffplay.c:3571
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:119
float FFTSample
Definition: avfft.h:35
static int dummy
Definition: ffplay.c:3569
#define fail()
Definition: checkasm.h:117
int8_t exp
Definition: eval.c:72
enum AVPixelFormat format
Definition: ffplay.c:369
double audio_clock
Definition: ffplay.c:235
int force_refresh
Definition: ffplay.c:207
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2239
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
AVDictionary * sws_dict
Definition: cmdutils.c:71
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3498
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2366
static void update_sample_display(VideoState *is, short *samples, int samples_size)
Definition: ffplay.c:2266
void av_rdft_calc(RDFTContext *s, FFTSample *data)
uint32_t end_display_time
Definition: avcodec.h:3878
static void decoder_destroy(Decoder *d)
Definition: ffplay.c:675
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:3881
static int genpts
Definition: ffplay.c:331
static AVPacket flush_pkt
Definition: ffplay.c:359
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:396
int flip_v
Definition: ffplay.c:166
double frame_last_returned_time
Definition: ffplay.c:281
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:457
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:488
static const char * subtitle_codec_name
Definition: ffplay.c:342
static int subtitle_disable
Definition: ffplay.c:319
void av_format_inject_global_side_data(AVFormatContext *s)
This function will cause global side data to be injected in the next packet of each stream as well as...
Definition: utils.c:154
int max_size
Definition: ffplay.c:174
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1407
int step
Definition: ffplay.c:293
SDL_Thread * decoder_tid
Definition: ffplay.c:200
AVDictionary * opts
Definition: movenc.c:50
static SDL_Window * window
Definition: ffplay.c:363
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:63
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
Definition: utils.c:4260
SDL_mutex * mutex
Definition: ffplay.c:125
static av_const double hypot(double x, double y)
Definition: libm.h:366
int audio_write_buf_size
Definition: ffplay.c:249
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
Definition: format.c:118
int linesize[4]
Definition: avcodec.h:3859
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:131
int channels
number of audio channels, only used for audio.
Definition: frame.h:523
#define SUBPICTURE_QUEUE_SIZE
Definition: ffplay.c:130
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:301
struct MyAVPacketList * next
Definition: ffplay.c:114
#define AV_CH_LAYOUT_STEREO_DOWNMIX
#define NAN
Definition: mathematics.h:64
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
#define FFMIN(a, b)
Definition: common.h:96
SDL_mutex * mutex
Definition: ffplay.c:177
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:400
int windex
Definition: ffplay.c:172
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:156
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:556
static int cursor_hidden
Definition: ffplay.c:346
static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
Definition: ffplay.c:577
AVSubtitle sub
Definition: ffplay.c:156
int width
picture width / height.
Definition: avcodec.h:1705
uint8_t w
Definition: llviddspenc.c:38
int main(int argc, char **argv)
Definition: ffplay.c:3666
int height
Definition: ffplay.c:162
#define SDL_VOLUME_STEP
Definition: ffplay.c:77
static void show_usage(void)
Definition: ffplay.c:3623
int nb_packets
Definition: ffplay.c:120
static int opt_width(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3470
static int get_video_frame(VideoState *is, AVFrame *frame)
Definition: ffplay.c:1764
int frame_drops_late
Definition: ffplay.c:259
struct AudioParams audio_src
Definition: ffplay.c:252
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
Definition: ffplay.c:3220
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1401
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:74
AVFormatContext * ctx
Definition: movenc.c:48
struct SwrContext * swr_alloc_set_opts(struct SwrContext *s, int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
Definition: swresample.c:59
static int fast
Definition: ffplay.c:330
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2306
int last_i_start
Definition: ffplay.c:266
uint16_t format
Definition: avcodec.h:3876
#define s(width, name)
Definition: cbs_vp9.c:257
#define OPT_INT64
Definition: cmdutils.h:170
MyAVPacketList * last_pkt
Definition: ffplay.c:119
Definition: avfft.h:72
void av_rdft_end(RDFTContext *s)
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1518
int n
Definition: avisynth_c.h:684
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2334
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
static int is_full_screen
Definition: ffplay.c:356
#define AV_SYNC_THRESHOLD_MAX
Definition: ffplay.c:82
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:823
static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue)
Definition: ffplay.c:2722
static int startup_volume
Definition: ffplay.c:325
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:72
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:477
uint8_t * data[4]
data+linesize for the bitmap of this subtitle.
Definition: avcodec.h:3858
static int decoder_reorder_pts
Definition: ffplay.c:333
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:97
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1395
int16_t sample_array[SAMPLE_ARRAY_SIZE]
Definition: ffplay.c:264
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:1008
int paused
Definition: ffplay.c:149
static const char * input_filename
Definition: ffplay.c:311
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
#define FF_ARRAY_ELEMS(a)
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:842
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:1135
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:512
static Frame * frame_queue_peek_last(FrameQueue *f)
Definition: ffplay.c:736
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3630
int64_t pos
Definition: ffplay.c:160
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
int last_audio_stream
Definition: ffplay.c:304
Stream structure.
Definition: avformat.h:874
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: decode.c:1985
const AVClass * avfilter_get_class(void)
Definition: avfilter.c:1620
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:1003
#define SAMPLE_CORRECTION_PERCENT_MAX
Definition: ffplay.c:89
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:291
#define AV_PIX_FMT_BGR555
Definition: pixfmt.h:372
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:681
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:354
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:4972
char * filename
Definition: ffplay.c:291
static int screen_height
Definition: ffplay.c:316
static int opt_duration(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3519
sample_rate
static AVInputFormat * iformat
Definition: ffprobe.c:253
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
int64_t next_pts
Definition: ffplay.c:198
static int autoexit
Definition: ffplay.c:334
AVFrame * frame
Definition: ffplay.c:155
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
int serial
Definition: ffplay.c:148
int uploaded
Definition: ffplay.c:165
enum AVMediaType codec_type
Definition: avcodec.h:1540
int(* read_seek)(struct AVFormatContext *, int stream_index, int64_t timestamp, int flags)
Seek to a given timestamp relative to the frames in stream component stream_index.
Definition: avformat.h:739
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
Definition: ffplay.c:858
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:171
enum AVCodecID codec_id
Definition: avcodec.h:1542
static void do_exit(VideoState *is)
Definition: ffplay.c:1298
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int sample_rate
samples per second
Definition: avcodec.h:2188
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:249
AVIOContext * pb
I/O context.
Definition: avformat.h:1393
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:909
static int loop
Definition: ffplay.c:337
int last_paused
Definition: ffplay.c:209
static int exit_on_keydown
Definition: ffplay.c:335
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
FFT functions.
main external API structure.
Definition: avcodec.h:1532
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: allcodecs.c:877
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:592
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:352
Decoder subdec
Definition: ffplay.c:229
static void set_sdl_yuv_conversion_mode(AVFrame *frame)
Definition: ffplay.c:958
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
Definition: swresample.c:137