FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
34 #include "libavutil/eval.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/avassert.h"
42 #include "libavutil/time.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavcodec/avfft.h"
49 
50 #if CONFIG_AVFILTER
51 # include "libavfilter/avfilter.h"
52 # include "libavfilter/buffersink.h"
53 # include "libavfilter/buffersrc.h"
54 #endif
55 
56 #include <SDL.h>
57 #include <SDL_thread.h>
58 
59 #include "cmdutils.h"
60 
61 #include <assert.h>
62 
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65 
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 25
68 #define EXTERNAL_CLOCK_MIN_FRAMES 2
69 #define EXTERNAL_CLOCK_MAX_FRAMES 10
70 
71 /* Minimum SDL audio buffer size, in samples. */
72 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
73 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
74 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
75 
76 /* Step size for volume control in dB */
77 #define SDL_VOLUME_STEP (0.75)
78 
79 /* no AV sync correction is done if below the minimum AV sync threshold */
80 #define AV_SYNC_THRESHOLD_MIN 0.04
81 /* AV sync correction is done if above the maximum AV sync threshold */
82 #define AV_SYNC_THRESHOLD_MAX 0.1
83 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
84 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
85 /* no AV correction is done if too big error */
86 #define AV_NOSYNC_THRESHOLD 10.0
87 
88 /* maximum audio speed change to get correct sync */
89 #define SAMPLE_CORRECTION_PERCENT_MAX 10
90 
91 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
92 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
93 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
94 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
95 
96 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
97 #define AUDIO_DIFF_AVG_NB 20
98 
99 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
100 #define REFRESH_RATE 0.01
101 
102 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
103 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
104 #define SAMPLE_ARRAY_SIZE (8 * 65536)
105 
106 #define CURSOR_HIDE_DELAY 1000000
107 
108 #define USE_ONEPASS_SUBTITLE_RENDER 1
109 
110 static unsigned sws_flags = SWS_BICUBIC;
111 
112 typedef struct MyAVPacketList {
115  int serial;
117 
118 typedef struct PacketQueue {
121  int size;
122  int64_t duration;
124  int serial;
125  SDL_mutex *mutex;
126  SDL_cond *cond;
127 } PacketQueue;
128 
129 #define VIDEO_PICTURE_QUEUE_SIZE 3
130 #define SUBPICTURE_QUEUE_SIZE 16
131 #define SAMPLE_QUEUE_SIZE 9
132 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
133 
134 typedef struct AudioParams {
135  int freq;
136  int channels;
137  int64_t channel_layout;
141 } AudioParams;
142 
143 typedef struct Clock {
144  double pts; /* clock base */
145  double pts_drift; /* clock base minus time at which we updated the clock */
146  double last_updated;
147  double speed;
148  int serial; /* clock is based on a packet with this serial */
149  int paused;
150  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
151 } Clock;
152 
153 /* Common struct for handling all types of decoded data and allocated render buffers. */
154 typedef struct Frame {
157  int serial;
158  double pts; /* presentation timestamp for the frame */
159  double duration; /* estimated duration of the frame */
160  int64_t pos; /* byte position of the frame in the input file */
161  int width;
162  int height;
163  int format;
165  int uploaded;
166  int flip_v;
167 } Frame;
168 
169 typedef struct FrameQueue {
171  int rindex;
172  int windex;
173  int size;
174  int max_size;
177  SDL_mutex *mutex;
178  SDL_cond *cond;
180 } FrameQueue;
181 
182 enum {
183  AV_SYNC_AUDIO_MASTER, /* default choice */
185  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
186 };
187 
188 typedef struct Decoder {
193  int finished;
195  SDL_cond *empty_queue_cond;
196  int64_t start_pts;
198  int64_t next_pts;
200  SDL_Thread *decoder_tid;
201 } Decoder;
202 
203 typedef struct VideoState {
204  SDL_Thread *read_tid;
208  int paused;
211  int seek_req;
213  int64_t seek_pos;
214  int64_t seek_rel;
217  int realtime;
218 
222 
226 
230 
232 
234 
235  double audio_clock;
237  double audio_diff_cum; /* used for AV difference average computation */
246  unsigned int audio_buf_size; /* in bytes */
247  unsigned int audio_buf1_size;
248  int audio_buf_index; /* in bytes */
251  int muted;
253 #if CONFIG_AVFILTER
254  struct AudioParams audio_filter_src;
255 #endif
260 
261  enum ShowMode {
263  } show_mode;
270  int xpos;
272  SDL_Texture *vis_texture;
273  SDL_Texture *sub_texture;
274  SDL_Texture *vid_texture;
275 
279 
280  double frame_timer;
286  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
289  int eof;
290 
291  char *filename;
293  int step;
294 
295 #if CONFIG_AVFILTER
296  int vfilter_idx;
297  AVFilterContext *in_video_filter; // the first filter in the video chain
298  AVFilterContext *out_video_filter; // the last filter in the video chain
299  AVFilterContext *in_audio_filter; // the first filter in the audio chain
300  AVFilterContext *out_audio_filter; // the last filter in the audio chain
301  AVFilterGraph *agraph; // audio filter graph
302 #endif
303 
305 
307 } VideoState;
308 
309 /* options specified by the user */
311 static const char *input_filename;
312 static const char *window_title;
313 static int default_width = 640;
314 static int default_height = 480;
315 static int screen_width = 0;
316 static int screen_height = 0;
317 static int audio_disable;
318 static int video_disable;
319 static int subtitle_disable;
320 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
321 static int seek_by_bytes = -1;
322 static int display_disable;
323 static int borderless;
324 static int startup_volume = 100;
325 static int show_status = 1;
327 static int64_t start_time = AV_NOPTS_VALUE;
328 static int64_t duration = AV_NOPTS_VALUE;
329 static int fast = 0;
330 static int genpts = 0;
331 static int lowres = 0;
332 static int decoder_reorder_pts = -1;
333 static int autoexit;
334 static int exit_on_keydown;
335 static int exit_on_mousedown;
336 static int loop = 1;
337 static int framedrop = -1;
338 static int infinite_buffer = -1;
339 static enum ShowMode show_mode = SHOW_MODE_NONE;
340 static const char *audio_codec_name;
341 static const char *subtitle_codec_name;
342 static const char *video_codec_name;
343 double rdftspeed = 0.02;
344 static int64_t cursor_last_shown;
345 static int cursor_hidden = 0;
346 #if CONFIG_AVFILTER
347 static const char **vfilters_list = NULL;
348 static int nb_vfilters = 0;
349 static char *afilters = NULL;
350 #endif
351 static int autorotate = 1;
352 static int find_stream_info = 1;
353 
354 /* current context */
355 static int is_full_screen;
356 static int64_t audio_callback_time;
357 
359 
360 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
361 
362 static SDL_Window *window;
363 static SDL_Renderer *renderer;
364 static SDL_RendererInfo renderer_info = {0};
365 static SDL_AudioDeviceID audio_dev;
366 
367 static const struct TextureFormatEntry {
371  { AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
372  { AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
373  { AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
374  { AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
375  { AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
376  { AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
377  { AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
378  { AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
379  { AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
380  { AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
381  { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
382  { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
383  { AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
384  { AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
385  { AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
386  { AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
387  { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
388  { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
389  { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
390  { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
391 };
392 
393 #if CONFIG_AVFILTER
394 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
395 {
396  GROW_ARRAY(vfilters_list, nb_vfilters);
397  vfilters_list[nb_vfilters - 1] = arg;
398  return 0;
399 }
400 #endif
401 
402 static inline
403 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
404  enum AVSampleFormat fmt2, int64_t channel_count2)
405 {
406  /* If channel count == 1, planar and non-planar formats are the same */
407  if (channel_count1 == 1 && channel_count2 == 1)
409  else
410  return channel_count1 != channel_count2 || fmt1 != fmt2;
411 }
412 
413 static inline
414 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
415 {
416  if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
417  return channel_layout;
418  else
419  return 0;
420 }
421 
423 {
424  MyAVPacketList *pkt1;
425 
426  if (q->abort_request)
427  return -1;
428 
429  pkt1 = av_malloc(sizeof(MyAVPacketList));
430  if (!pkt1)
431  return -1;
432  pkt1->pkt = *pkt;
433  pkt1->next = NULL;
434  if (pkt == &flush_pkt)
435  q->serial++;
436  pkt1->serial = q->serial;
437 
438  if (!q->last_pkt)
439  q->first_pkt = pkt1;
440  else
441  q->last_pkt->next = pkt1;
442  q->last_pkt = pkt1;
443  q->nb_packets++;
444  q->size += pkt1->pkt.size + sizeof(*pkt1);
445  q->duration += pkt1->pkt.duration;
446  /* XXX: should duplicate packet data in DV case */
447  SDL_CondSignal(q->cond);
448  return 0;
449 }
450 
452 {
453  int ret;
454 
455  SDL_LockMutex(q->mutex);
456  ret = packet_queue_put_private(q, pkt);
457  SDL_UnlockMutex(q->mutex);
458 
459  if (pkt != &flush_pkt && ret < 0)
460  av_packet_unref(pkt);
461 
462  return ret;
463 }
464 
465 static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
466 {
467  AVPacket pkt1, *pkt = &pkt1;
468  av_init_packet(pkt);
469  pkt->data = NULL;
470  pkt->size = 0;
471  pkt->stream_index = stream_index;
472  return packet_queue_put(q, pkt);
473 }
474 
475 /* packet queue handling */
477 {
478  memset(q, 0, sizeof(PacketQueue));
479  q->mutex = SDL_CreateMutex();
480  if (!q->mutex) {
481  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
482  return AVERROR(ENOMEM);
483  }
484  q->cond = SDL_CreateCond();
485  if (!q->cond) {
486  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
487  return AVERROR(ENOMEM);
488  }
489  q->abort_request = 1;
490  return 0;
491 }
492 
494 {
495  MyAVPacketList *pkt, *pkt1;
496 
497  SDL_LockMutex(q->mutex);
498  for (pkt = q->first_pkt; pkt; pkt = pkt1) {
499  pkt1 = pkt->next;
500  av_packet_unref(&pkt->pkt);
501  av_freep(&pkt);
502  }
503  q->last_pkt = NULL;
504  q->first_pkt = NULL;
505  q->nb_packets = 0;
506  q->size = 0;
507  q->duration = 0;
508  SDL_UnlockMutex(q->mutex);
509 }
510 
512 {
514  SDL_DestroyMutex(q->mutex);
515  SDL_DestroyCond(q->cond);
516 }
517 
519 {
520  SDL_LockMutex(q->mutex);
521 
522  q->abort_request = 1;
523 
524  SDL_CondSignal(q->cond);
525 
526  SDL_UnlockMutex(q->mutex);
527 }
528 
530 {
531  SDL_LockMutex(q->mutex);
532  q->abort_request = 0;
533  packet_queue_put_private(q, &flush_pkt);
534  SDL_UnlockMutex(q->mutex);
535 }
536 
537 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
538 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
539 {
540  MyAVPacketList *pkt1;
541  int ret;
542 
543  SDL_LockMutex(q->mutex);
544 
545  for (;;) {
546  if (q->abort_request) {
547  ret = -1;
548  break;
549  }
550 
551  pkt1 = q->first_pkt;
552  if (pkt1) {
553  q->first_pkt = pkt1->next;
554  if (!q->first_pkt)
555  q->last_pkt = NULL;
556  q->nb_packets--;
557  q->size -= pkt1->pkt.size + sizeof(*pkt1);
558  q->duration -= pkt1->pkt.duration;
559  *pkt = pkt1->pkt;
560  if (serial)
561  *serial = pkt1->serial;
562  av_free(pkt1);
563  ret = 1;
564  break;
565  } else if (!block) {
566  ret = 0;
567  break;
568  } else {
569  SDL_CondWait(q->cond, q->mutex);
570  }
571  }
572  SDL_UnlockMutex(q->mutex);
573  return ret;
574 }
575 
576 static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
577  memset(d, 0, sizeof(Decoder));
578  d->avctx = avctx;
579  d->queue = queue;
580  d->empty_queue_cond = empty_queue_cond;
582  d->pkt_serial = -1;
583 }
584 
586  int ret = AVERROR(EAGAIN);
587 
588  for (;;) {
589  AVPacket pkt;
590 
591  if (d->queue->serial == d->pkt_serial) {
592  do {
593  if (d->queue->abort_request)
594  return -1;
595 
596  switch (d->avctx->codec_type) {
597  case AVMEDIA_TYPE_VIDEO:
598  ret = avcodec_receive_frame(d->avctx, frame);
599  if (ret >= 0) {
600  if (decoder_reorder_pts == -1) {
601  frame->pts = frame->best_effort_timestamp;
602  } else if (!decoder_reorder_pts) {
603  frame->pts = frame->pkt_dts;
604  }
605  }
606  break;
607  case AVMEDIA_TYPE_AUDIO:
608  ret = avcodec_receive_frame(d->avctx, frame);
609  if (ret >= 0) {
610  AVRational tb = (AVRational){1, frame->sample_rate};
611  if (frame->pts != AV_NOPTS_VALUE)
612  frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);
613  else if (d->next_pts != AV_NOPTS_VALUE)
614  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
615  if (frame->pts != AV_NOPTS_VALUE) {
616  d->next_pts = frame->pts + frame->nb_samples;
617  d->next_pts_tb = tb;
618  }
619  }
620  break;
621  }
622  if (ret == AVERROR_EOF) {
623  d->finished = d->pkt_serial;
625  return 0;
626  }
627  if (ret >= 0)
628  return 1;
629  } while (ret != AVERROR(EAGAIN));
630  }
631 
632  do {
633  if (d->queue->nb_packets == 0)
634  SDL_CondSignal(d->empty_queue_cond);
635  if (d->packet_pending) {
636  av_packet_move_ref(&pkt, &d->pkt);
637  d->packet_pending = 0;
638  } else {
639  if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
640  return -1;
641  }
642  } while (d->queue->serial != d->pkt_serial);
643 
644  if (pkt.data == flush_pkt.data) {
646  d->finished = 0;
647  d->next_pts = d->start_pts;
648  d->next_pts_tb = d->start_pts_tb;
649  } else {
651  int got_frame = 0;
652  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &pkt);
653  if (ret < 0) {
654  ret = AVERROR(EAGAIN);
655  } else {
656  if (got_frame && !pkt.data) {
657  d->packet_pending = 1;
658  av_packet_move_ref(&d->pkt, &pkt);
659  }
660  ret = got_frame ? 0 : (pkt.data ? AVERROR(EAGAIN) : AVERROR_EOF);
661  }
662  } else {
663  if (avcodec_send_packet(d->avctx, &pkt) == AVERROR(EAGAIN)) {
664  av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
665  d->packet_pending = 1;
666  av_packet_move_ref(&d->pkt, &pkt);
667  }
668  }
669  av_packet_unref(&pkt);
670  }
671  }
672 }
673 
674 static void decoder_destroy(Decoder *d) {
675  av_packet_unref(&d->pkt);
677 }
678 
680 {
681  av_frame_unref(vp->frame);
682  avsubtitle_free(&vp->sub);
683 }
684 
685 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
686 {
687  int i;
688  memset(f, 0, sizeof(FrameQueue));
689  if (!(f->mutex = SDL_CreateMutex())) {
690  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
691  return AVERROR(ENOMEM);
692  }
693  if (!(f->cond = SDL_CreateCond())) {
694  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
695  return AVERROR(ENOMEM);
696  }
697  f->pktq = pktq;
698  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
699  f->keep_last = !!keep_last;
700  for (i = 0; i < f->max_size; i++)
701  if (!(f->queue[i].frame = av_frame_alloc()))
702  return AVERROR(ENOMEM);
703  return 0;
704 }
705 
707 {
708  int i;
709  for (i = 0; i < f->max_size; i++) {
710  Frame *vp = &f->queue[i];
712  av_frame_free(&vp->frame);
713  }
714  SDL_DestroyMutex(f->mutex);
715  SDL_DestroyCond(f->cond);
716 }
717 
719 {
720  SDL_LockMutex(f->mutex);
721  SDL_CondSignal(f->cond);
722  SDL_UnlockMutex(f->mutex);
723 }
724 
726 {
727  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
728 }
729 
731 {
732  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
733 }
734 
736 {
737  return &f->queue[f->rindex];
738 }
739 
741 {
742  /* wait until we have space to put a new frame */
743  SDL_LockMutex(f->mutex);
744  while (f->size >= f->max_size &&
745  !f->pktq->abort_request) {
746  SDL_CondWait(f->cond, f->mutex);
747  }
748  SDL_UnlockMutex(f->mutex);
749 
750  if (f->pktq->abort_request)
751  return NULL;
752 
753  return &f->queue[f->windex];
754 }
755 
757 {
758  /* wait until we have a readable a new frame */
759  SDL_LockMutex(f->mutex);
760  while (f->size - f->rindex_shown <= 0 &&
761  !f->pktq->abort_request) {
762  SDL_CondWait(f->cond, f->mutex);
763  }
764  SDL_UnlockMutex(f->mutex);
765 
766  if (f->pktq->abort_request)
767  return NULL;
768 
769  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
770 }
771 
773 {
774  if (++f->windex == f->max_size)
775  f->windex = 0;
776  SDL_LockMutex(f->mutex);
777  f->size++;
778  SDL_CondSignal(f->cond);
779  SDL_UnlockMutex(f->mutex);
780 }
781 
783 {
784  if (f->keep_last && !f->rindex_shown) {
785  f->rindex_shown = 1;
786  return;
787  }
789  if (++f->rindex == f->max_size)
790  f->rindex = 0;
791  SDL_LockMutex(f->mutex);
792  f->size--;
793  SDL_CondSignal(f->cond);
794  SDL_UnlockMutex(f->mutex);
795 }
796 
797 /* return the number of undisplayed frames in the queue */
799 {
800  return f->size - f->rindex_shown;
801 }
802 
803 /* return last shown position */
805 {
806  Frame *fp = &f->queue[f->rindex];
807  if (f->rindex_shown && fp->serial == f->pktq->serial)
808  return fp->pos;
809  else
810  return -1;
811 }
812 
813 static void decoder_abort(Decoder *d, FrameQueue *fq)
814 {
816  frame_queue_signal(fq);
817  SDL_WaitThread(d->decoder_tid, NULL);
818  d->decoder_tid = NULL;
820 }
821 
822 static inline void fill_rectangle(int x, int y, int w, int h)
823 {
824  SDL_Rect rect;
825  rect.x = x;
826  rect.y = y;
827  rect.w = w;
828  rect.h = h;
829  if (w && h)
830  SDL_RenderFillRect(renderer, &rect);
831 }
832 
833 static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
834 {
835  Uint32 format;
836  int access, w, h;
837  if (!*texture || SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
838  void *pixels;
839  int pitch;
840  if (*texture)
841  SDL_DestroyTexture(*texture);
842  if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
843  return -1;
844  if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
845  return -1;
846  if (init_texture) {
847  if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
848  return -1;
849  memset(pixels, 0, pitch * new_height);
850  SDL_UnlockTexture(*texture);
851  }
852  av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
853  }
854  return 0;
855 }
856 
857 static void calculate_display_rect(SDL_Rect *rect,
858  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
859  int pic_width, int pic_height, AVRational pic_sar)
860 {
861  float aspect_ratio;
862  int width, height, x, y;
863 
864  if (pic_sar.num == 0)
865  aspect_ratio = 0;
866  else
867  aspect_ratio = av_q2d(pic_sar);
868 
869  if (aspect_ratio <= 0.0)
870  aspect_ratio = 1.0;
871  aspect_ratio *= (float)pic_width / (float)pic_height;
872 
873  /* XXX: we suppose the screen has a 1.0 pixel ratio */
874  height = scr_height;
875  width = lrint(height * aspect_ratio) & ~1;
876  if (width > scr_width) {
877  width = scr_width;
878  height = lrint(width / aspect_ratio) & ~1;
879  }
880  x = (scr_width - width) / 2;
881  y = (scr_height - height) / 2;
882  rect->x = scr_xleft + x;
883  rect->y = scr_ytop + y;
884  rect->w = FFMAX(width, 1);
885  rect->h = FFMAX(height, 1);
886 }
887 
888 static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
889 {
890  int i;
891  *sdl_blendmode = SDL_BLENDMODE_NONE;
892  *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
893  if (format == AV_PIX_FMT_RGB32 ||
894  format == AV_PIX_FMT_RGB32_1 ||
895  format == AV_PIX_FMT_BGR32 ||
896  format == AV_PIX_FMT_BGR32_1)
897  *sdl_blendmode = SDL_BLENDMODE_BLEND;
898  for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
899  if (format == sdl_texture_format_map[i].format) {
900  *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
901  return;
902  }
903  }
904 }
905 
906 static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx) {
907  int ret = 0;
908  Uint32 sdl_pix_fmt;
909  SDL_BlendMode sdl_blendmode;
910  get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
911  if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
912  return -1;
913  switch (sdl_pix_fmt) {
914  case SDL_PIXELFORMAT_UNKNOWN:
915  /* This should only happen if we are not using avfilter... */
916  *img_convert_ctx = sws_getCachedContext(*img_convert_ctx,
917  frame->width, frame->height, frame->format, frame->width, frame->height,
919  if (*img_convert_ctx != NULL) {
920  uint8_t *pixels[4];
921  int pitch[4];
922  if (!SDL_LockTexture(*tex, NULL, (void **)pixels, pitch)) {
923  sws_scale(*img_convert_ctx, (const uint8_t * const *)frame->data, frame->linesize,
924  0, frame->height, pixels, pitch);
925  SDL_UnlockTexture(*tex);
926  }
927  } else {
928  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
929  ret = -1;
930  }
931  break;
932  case SDL_PIXELFORMAT_IYUV:
933  if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
934  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
935  frame->data[1], frame->linesize[1],
936  frame->data[2], frame->linesize[2]);
937  } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
938  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
939  frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
940  frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
941  } else {
942  av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
943  return -1;
944  }
945  break;
946  default:
947  if (frame->linesize[0] < 0) {
948  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
949  } else {
950  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
951  }
952  break;
953  }
954  return ret;
955 }
956 
958 {
959 #if SDL_VERSION_ATLEAST(2,0,8)
960  SDL_YUV_CONVERSION_MODE mode = SDL_YUV_CONVERSION_AUTOMATIC;
961  if (frame && (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUYV422 || frame->format == AV_PIX_FMT_UYVY422)) {
962  if (frame->color_range == AVCOL_RANGE_JPEG)
963  mode = SDL_YUV_CONVERSION_JPEG;
964  else if (frame->colorspace == AVCOL_SPC_BT709)
965  mode = SDL_YUV_CONVERSION_BT709;
966  else if (frame->colorspace == AVCOL_SPC_BT470BG || frame->colorspace == AVCOL_SPC_SMPTE170M || frame->colorspace == AVCOL_SPC_SMPTE240M)
967  mode = SDL_YUV_CONVERSION_BT601;
968  }
969  SDL_SetYUVConversionMode(mode);
970 #endif
971 }
972 
974 {
975  Frame *vp;
976  Frame *sp = NULL;
977  SDL_Rect rect;
978 
979  vp = frame_queue_peek_last(&is->pictq);
980  if (is->subtitle_st) {
981  if (frame_queue_nb_remaining(&is->subpq) > 0) {
982  sp = frame_queue_peek(&is->subpq);
983 
984  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
985  if (!sp->uploaded) {
986  uint8_t* pixels[4];
987  int pitch[4];
988  int i;
989  if (!sp->width || !sp->height) {
990  sp->width = vp->width;
991  sp->height = vp->height;
992  }
993  if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
994  return;
995 
996  for (i = 0; i < sp->sub.num_rects; i++) {
997  AVSubtitleRect *sub_rect = sp->sub.rects[i];
998 
999  sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
1000  sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
1001  sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
1002  sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
1003 
1005  sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
1006  sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
1007  0, NULL, NULL, NULL);
1008  if (!is->sub_convert_ctx) {
1009  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1010  return;
1011  }
1012  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) {
1013  sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
1014  0, sub_rect->h, pixels, pitch);
1015  SDL_UnlockTexture(is->sub_texture);
1016  }
1017  }
1018  sp->uploaded = 1;
1019  }
1020  } else
1021  sp = NULL;
1022  }
1023  }
1024 
1025  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1026 
1027  if (!vp->uploaded) {
1028  if (upload_texture(&is->vid_texture, vp->frame, &is->img_convert_ctx) < 0)
1029  return;
1030  vp->uploaded = 1;
1031  vp->flip_v = vp->frame->linesize[0] < 0;
1032  }
1033 
1035  SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
1037  if (sp) {
1038 #if USE_ONEPASS_SUBTITLE_RENDER
1039  SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
1040 #else
1041  int i;
1042  double xratio = (double)rect.w / (double)sp->width;
1043  double yratio = (double)rect.h / (double)sp->height;
1044  for (i = 0; i < sp->sub.num_rects; i++) {
1045  SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
1046  SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
1047  .y = rect.y + sub_rect->y * yratio,
1048  .w = sub_rect->w * xratio,
1049  .h = sub_rect->h * yratio};
1050  SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
1051  }
1052 #endif
1053  }
1054 }
1055 
1056 static inline int compute_mod(int a, int b)
1057 {
1058  return a < 0 ? a%b + b : a%b;
1059 }
1060 
1062 {
1063  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1064  int ch, channels, h, h2;
1065  int64_t time_diff;
1066  int rdft_bits, nb_freq;
1067 
1068  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1069  ;
1070  nb_freq = 1 << (rdft_bits - 1);
1071 
1072  /* compute display index : center on currently output samples */
1073  channels = s->audio_tgt.channels;
1074  nb_display_channels = channels;
1075  if (!s->paused) {
1076  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1077  n = 2 * channels;
1078  delay = s->audio_write_buf_size;
1079  delay /= n;
1080 
1081  /* to be more precise, we take into account the time spent since
1082  the last buffer computation */
1083  if (audio_callback_time) {
1084  time_diff = av_gettime_relative() - audio_callback_time;
1085  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1086  }
1087 
1088  delay += 2 * data_used;
1089  if (delay < data_used)
1090  delay = data_used;
1091 
1092  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1093  if (s->show_mode == SHOW_MODE_WAVES) {
1094  h = INT_MIN;
1095  for (i = 0; i < 1000; i += channels) {
1096  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1097  int a = s->sample_array[idx];
1098  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1099  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1100  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1101  int score = a - d;
1102  if (h < score && (b ^ c) < 0) {
1103  h = score;
1104  i_start = idx;
1105  }
1106  }
1107  }
1108 
1109  s->last_i_start = i_start;
1110  } else {
1111  i_start = s->last_i_start;
1112  }
1113 
1114  if (s->show_mode == SHOW_MODE_WAVES) {
1115  SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
1116 
1117  /* total height for one channel */
1118  h = s->height / nb_display_channels;
1119  /* graph height / 2 */
1120  h2 = (h * 9) / 20;
1121  for (ch = 0; ch < nb_display_channels; ch++) {
1122  i = i_start + ch;
1123  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1124  for (x = 0; x < s->width; x++) {
1125  y = (s->sample_array[i] * h2) >> 15;
1126  if (y < 0) {
1127  y = -y;
1128  ys = y1 - y;
1129  } else {
1130  ys = y1;
1131  }
1132  fill_rectangle(s->xleft + x, ys, 1, y);
1133  i += channels;
1134  if (i >= SAMPLE_ARRAY_SIZE)
1135  i -= SAMPLE_ARRAY_SIZE;
1136  }
1137  }
1138 
1139  SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
1140 
1141  for (ch = 1; ch < nb_display_channels; ch++) {
1142  y = s->ytop + ch * h;
1143  fill_rectangle(s->xleft, y, s->width, 1);
1144  }
1145  } else {
1146  if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
1147  return;
1148 
1149  nb_display_channels= FFMIN(nb_display_channels, 2);
1150  if (rdft_bits != s->rdft_bits) {
1151  av_rdft_end(s->rdft);
1152  av_free(s->rdft_data);
1153  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
1154  s->rdft_bits = rdft_bits;
1155  s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
1156  }
1157  if (!s->rdft || !s->rdft_data){
1158  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1159  s->show_mode = SHOW_MODE_WAVES;
1160  } else {
1161  FFTSample *data[2];
1162  SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
1163  uint32_t *pixels;
1164  int pitch;
1165  for (ch = 0; ch < nb_display_channels; ch++) {
1166  data[ch] = s->rdft_data + 2 * nb_freq * ch;
1167  i = i_start + ch;
1168  for (x = 0; x < 2 * nb_freq; x++) {
1169  double w = (x-nb_freq) * (1.0 / nb_freq);
1170  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
1171  i += channels;
1172  if (i >= SAMPLE_ARRAY_SIZE)
1173  i -= SAMPLE_ARRAY_SIZE;
1174  }
1175  av_rdft_calc(s->rdft, data[ch]);
1176  }
1177  /* Least efficient way to do this, we should of course
1178  * directly access it but it is more than fast enough. */
1179  if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
1180  pitch >>= 2;
1181  pixels += pitch * s->height;
1182  for (y = 0; y < s->height; y++) {
1183  double w = 1 / sqrt(nb_freq);
1184  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1185  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][2 * y + 0], data[1][2 * y + 1]))
1186  : a;
1187  a = FFMIN(a, 255);
1188  b = FFMIN(b, 255);
1189  pixels -= pitch;
1190  *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
1191  }
1192  SDL_UnlockTexture(s->vis_texture);
1193  }
1194  SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
1195  }
1196  if (!s->paused)
1197  s->xpos++;
1198  if (s->xpos >= s->width)
1199  s->xpos= s->xleft;
1200  }
1201 }
1202 
1203 static void stream_component_close(VideoState *is, int stream_index)
1204 {
1205  AVFormatContext *ic = is->ic;
1206  AVCodecParameters *codecpar;
1207 
1208  if (stream_index < 0 || stream_index >= ic->nb_streams)
1209  return;
1210  codecpar = ic->streams[stream_index]->codecpar;
1211 
1212  switch (codecpar->codec_type) {
1213  case AVMEDIA_TYPE_AUDIO:
1214  decoder_abort(&is->auddec, &is->sampq);
1215  SDL_CloseAudioDevice(audio_dev);
1216  decoder_destroy(&is->auddec);
1217  swr_free(&is->swr_ctx);
1218  av_freep(&is->audio_buf1);
1219  is->audio_buf1_size = 0;
1220  is->audio_buf = NULL;
1221 
1222  if (is->rdft) {
1223  av_rdft_end(is->rdft);
1224  av_freep(&is->rdft_data);
1225  is->rdft = NULL;
1226  is->rdft_bits = 0;
1227  }
1228  break;
1229  case AVMEDIA_TYPE_VIDEO:
1230  decoder_abort(&is->viddec, &is->pictq);
1231  decoder_destroy(&is->viddec);
1232  break;
1233  case AVMEDIA_TYPE_SUBTITLE:
1234  decoder_abort(&is->subdec, &is->subpq);
1235  decoder_destroy(&is->subdec);
1236  break;
1237  default:
1238  break;
1239  }
1240 
1241  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1242  switch (codecpar->codec_type) {
1243  case AVMEDIA_TYPE_AUDIO:
1244  is->audio_st = NULL;
1245  is->audio_stream = -1;
1246  break;
1247  case AVMEDIA_TYPE_VIDEO:
1248  is->video_st = NULL;
1249  is->video_stream = -1;
1250  break;
1251  case AVMEDIA_TYPE_SUBTITLE:
1252  is->subtitle_st = NULL;
1253  is->subtitle_stream = -1;
1254  break;
1255  default:
1256  break;
1257  }
1258 }
1259 
1260 static void stream_close(VideoState *is)
1261 {
1262  /* XXX: use a special url_shutdown call to abort parse cleanly */
1263  is->abort_request = 1;
1264  SDL_WaitThread(is->read_tid, NULL);
1265 
1266  /* close each stream */
1267  if (is->audio_stream >= 0)
1269  if (is->video_stream >= 0)
1271  if (is->subtitle_stream >= 0)
1273 
1274  avformat_close_input(&is->ic);
1275 
1279 
1280  /* free all pictures */
1281  frame_queue_destory(&is->pictq);
1282  frame_queue_destory(&is->sampq);
1283  frame_queue_destory(&is->subpq);
1284  SDL_DestroyCond(is->continue_read_thread);
1287  av_free(is->filename);
1288  if (is->vis_texture)
1289  SDL_DestroyTexture(is->vis_texture);
1290  if (is->vid_texture)
1291  SDL_DestroyTexture(is->vid_texture);
1292  if (is->sub_texture)
1293  SDL_DestroyTexture(is->sub_texture);
1294  av_free(is);
1295 }
1296 
1297 static void do_exit(VideoState *is)
1298 {
1299  if (is) {
1300  stream_close(is);
1301  }
1302  if (renderer)
1303  SDL_DestroyRenderer(renderer);
1304  if (window)
1305  SDL_DestroyWindow(window);
1306  uninit_opts();
1307 #if CONFIG_AVFILTER
1308  av_freep(&vfilters_list);
1309 #endif
1311  if (show_status)
1312  printf("\n");
1313  SDL_Quit();
1314  av_log(NULL, AV_LOG_QUIET, "%s", "");
1315  exit(0);
1316 }
1317 
1318 static void sigterm_handler(int sig)
1319 {
1320  exit(123);
1321 }
1322 
1324 {
1325  SDL_Rect rect;
1326  calculate_display_rect(&rect, 0, 0, INT_MAX, height, width, height, sar);
1327  default_width = rect.w;
1328  default_height = rect.h;
1329 }
1330 
1331 static int video_open(VideoState *is)
1332 {
1333  int w,h;
1334 
1335  if (screen_width) {
1336  w = screen_width;
1337  h = screen_height;
1338  } else {
1339  w = default_width;
1340  h = default_height;
1341  }
1342 
1343  if (!window_title)
1345  SDL_SetWindowTitle(window, window_title);
1346 
1347  SDL_SetWindowSize(window, w, h);
1348  SDL_SetWindowPosition(window, SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED);
1349  if (is_full_screen)
1350  SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1351  SDL_ShowWindow(window);
1352 
1353  is->width = w;
1354  is->height = h;
1355 
1356  return 0;
1357 }
1358 
1359 /* display the current picture, if any */
1360 static void video_display(VideoState *is)
1361 {
1362  if (!is->width)
1363  video_open(is);
1364 
1365  SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1366  SDL_RenderClear(renderer);
1367  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1368  video_audio_display(is);
1369  else if (is->video_st)
1370  video_image_display(is);
1371  SDL_RenderPresent(renderer);
1372 }
1373 
1374 static double get_clock(Clock *c)
1375 {
1376  if (*c->queue_serial != c->serial)
1377  return NAN;
1378  if (c->paused) {
1379  return c->pts;
1380  } else {
1381  double time = av_gettime_relative() / 1000000.0;
1382  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1383  }
1384 }
1385 
1386 static void set_clock_at(Clock *c, double pts, int serial, double time)
1387 {
1388  c->pts = pts;
1389  c->last_updated = time;
1390  c->pts_drift = c->pts - time;
1391  c->serial = serial;
1392 }
1393 
1394 static void set_clock(Clock *c, double pts, int serial)
1395 {
1396  double time = av_gettime_relative() / 1000000.0;
1397  set_clock_at(c, pts, serial, time);
1398 }
1399 
1400 static void set_clock_speed(Clock *c, double speed)
1401 {
1402  set_clock(c, get_clock(c), c->serial);
1403  c->speed = speed;
1404 }
1405 
1406 static void init_clock(Clock *c, int *queue_serial)
1407 {
1408  c->speed = 1.0;
1409  c->paused = 0;
1410  c->queue_serial = queue_serial;
1411  set_clock(c, NAN, -1);
1412 }
1413 
1414 static void sync_clock_to_slave(Clock *c, Clock *slave)
1415 {
1416  double clock = get_clock(c);
1417  double slave_clock = get_clock(slave);
1418  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1419  set_clock(c, slave_clock, slave->serial);
1420 }
1421 
1423  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1424  if (is->video_st)
1425  return AV_SYNC_VIDEO_MASTER;
1426  else
1427  return AV_SYNC_AUDIO_MASTER;
1428  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1429  if (is->audio_st)
1430  return AV_SYNC_AUDIO_MASTER;
1431  else
1432  return AV_SYNC_EXTERNAL_CLOCK;
1433  } else {
1434  return AV_SYNC_EXTERNAL_CLOCK;
1435  }
1436 }
1437 
1438 /* get the current master clock value */
1439 static double get_master_clock(VideoState *is)
1440 {
1441  double val;
1442 
1443  switch (get_master_sync_type(is)) {
1444  case AV_SYNC_VIDEO_MASTER:
1445  val = get_clock(&is->vidclk);
1446  break;
1447  case AV_SYNC_AUDIO_MASTER:
1448  val = get_clock(&is->audclk);
1449  break;
1450  default:
1451  val = get_clock(&is->extclk);
1452  break;
1453  }
1454  return val;
1455 }
1456 
1458  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1461  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1464  } else {
1465  double speed = is->extclk.speed;
1466  if (speed != 1.0)
1467  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1468  }
1469 }
1470 
1471 /* seek in the stream */
1472 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1473 {
1474  if (!is->seek_req) {
1475  is->seek_pos = pos;
1476  is->seek_rel = rel;
1477  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1478  if (seek_by_bytes)
1480  is->seek_req = 1;
1481  SDL_CondSignal(is->continue_read_thread);
1482  }
1483 }
1484 
1485 /* pause or resume the video */
1487 {
1488  if (is->paused) {
1489  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1490  if (is->read_pause_return != AVERROR(ENOSYS)) {
1491  is->vidclk.paused = 0;
1492  }
1493  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1494  }
1495  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1496  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1497 }
1498 
1499 static void toggle_pause(VideoState *is)
1500 {
1501  stream_toggle_pause(is);
1502  is->step = 0;
1503 }
1504 
1505 static void toggle_mute(VideoState *is)
1506 {
1507  is->muted = !is->muted;
1508 }
1509 
1510 static void update_volume(VideoState *is, int sign, double step)
1511 {
1512  double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1513  int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0));
1514  is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1515 }
1516 
1518 {
1519  /* if the stream is paused unpause it, then step */
1520  if (is->paused)
1521  stream_toggle_pause(is);
1522  is->step = 1;
1523 }
1524 
1525 static double compute_target_delay(double delay, VideoState *is)
1526 {
1527  double sync_threshold, diff = 0;
1528 
1529  /* update delay to follow master synchronisation source */
1531  /* if video is slave, we try to correct big delays by
1532  duplicating or deleting a frame */
1533  diff = get_clock(&is->vidclk) - get_master_clock(is);
1534 
1535  /* skip or repeat frame. We take into account the
1536  delay to compute the threshold. I still don't know
1537  if it is the best guess */
1538  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1539  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1540  if (diff <= -sync_threshold)
1541  delay = FFMAX(0, delay + diff);
1542  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1543  delay = delay + diff;
1544  else if (diff >= sync_threshold)
1545  delay = 2 * delay;
1546  }
1547  }
1548 
1549  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1550  delay, -diff);
1551 
1552  return delay;
1553 }
1554 
1555 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1556  if (vp->serial == nextvp->serial) {
1557  double duration = nextvp->pts - vp->pts;
1558  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1559  return vp->duration;
1560  else
1561  return duration;
1562  } else {
1563  return 0.0;
1564  }
1565 }
1566 
1567 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1568  /* update current video pts */
1569  set_clock(&is->vidclk, pts, serial);
1570  sync_clock_to_slave(&is->extclk, &is->vidclk);
1571 }
1572 
1573 /* called to display each frame */
1574 static void video_refresh(void *opaque, double *remaining_time)
1575 {
1576  VideoState *is = opaque;
1577  double time;
1578 
1579  Frame *sp, *sp2;
1580 
1581  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1583 
1584  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1585  time = av_gettime_relative() / 1000000.0;
1586  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1587  video_display(is);
1588  is->last_vis_time = time;
1589  }
1590  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1591  }
1592 
1593  if (is->video_st) {
1594 retry:
1595  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1596  // nothing to do, no picture to display in the queue
1597  } else {
1598  double last_duration, duration, delay;
1599  Frame *vp, *lastvp;
1600 
1601  /* dequeue the picture */
1602  lastvp = frame_queue_peek_last(&is->pictq);
1603  vp = frame_queue_peek(&is->pictq);
1604 
1605  if (vp->serial != is->videoq.serial) {
1606  frame_queue_next(&is->pictq);
1607  goto retry;
1608  }
1609 
1610  if (lastvp->serial != vp->serial)
1611  is->frame_timer = av_gettime_relative() / 1000000.0;
1612 
1613  if (is->paused)
1614  goto display;
1615 
1616  /* compute nominal last_duration */
1617  last_duration = vp_duration(is, lastvp, vp);
1618  delay = compute_target_delay(last_duration, is);
1619 
1620  time= av_gettime_relative()/1000000.0;
1621  if (time < is->frame_timer + delay) {
1622  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1623  goto display;
1624  }
1625 
1626  is->frame_timer += delay;
1627  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1628  is->frame_timer = time;
1629 
1630  SDL_LockMutex(is->pictq.mutex);
1631  if (!isnan(vp->pts))
1632  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1633  SDL_UnlockMutex(is->pictq.mutex);
1634 
1635  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1636  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1637  duration = vp_duration(is, vp, nextvp);
1638  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1639  is->frame_drops_late++;
1640  frame_queue_next(&is->pictq);
1641  goto retry;
1642  }
1643  }
1644 
1645  if (is->subtitle_st) {
1646  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1647  sp = frame_queue_peek(&is->subpq);
1648 
1649  if (frame_queue_nb_remaining(&is->subpq) > 1)
1650  sp2 = frame_queue_peek_next(&is->subpq);
1651  else
1652  sp2 = NULL;
1653 
1654  if (sp->serial != is->subtitleq.serial
1655  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1656  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1657  {
1658  if (sp->uploaded) {
1659  int i;
1660  for (i = 0; i < sp->sub.num_rects; i++) {
1661  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1662  uint8_t *pixels;
1663  int pitch, j;
1664 
1665  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
1666  for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1667  memset(pixels, 0, sub_rect->w << 2);
1668  SDL_UnlockTexture(is->sub_texture);
1669  }
1670  }
1671  }
1672  frame_queue_next(&is->subpq);
1673  } else {
1674  break;
1675  }
1676  }
1677  }
1678 
1679  frame_queue_next(&is->pictq);
1680  is->force_refresh = 1;
1681 
1682  if (is->step && !is->paused)
1683  stream_toggle_pause(is);
1684  }
1685 display:
1686  /* display picture */
1687  if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1688  video_display(is);
1689  }
1690  is->force_refresh = 0;
1691  if (show_status) {
1692  static int64_t last_time;
1693  int64_t cur_time;
1694  int aqsize, vqsize, sqsize;
1695  double av_diff;
1696 
1697  cur_time = av_gettime_relative();
1698  if (!last_time || (cur_time - last_time) >= 30000) {
1699  aqsize = 0;
1700  vqsize = 0;
1701  sqsize = 0;
1702  if (is->audio_st)
1703  aqsize = is->audioq.size;
1704  if (is->video_st)
1705  vqsize = is->videoq.size;
1706  if (is->subtitle_st)
1707  sqsize = is->subtitleq.size;
1708  av_diff = 0;
1709  if (is->audio_st && is->video_st)
1710  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1711  else if (is->video_st)
1712  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1713  else if (is->audio_st)
1714  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1716  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1717  get_master_clock(is),
1718  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1719  av_diff,
1721  aqsize / 1024,
1722  vqsize / 1024,
1723  sqsize,
1726  fflush(stdout);
1727  last_time = cur_time;
1728  }
1729  }
1730 }
1731 
1732 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1733 {
1734  Frame *vp;
1735 
1736 #if defined(DEBUG_SYNC)
1737  printf("frame_type=%c pts=%0.3f\n",
1738  av_get_picture_type_char(src_frame->pict_type), pts);
1739 #endif
1740 
1741  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1742  return -1;
1743 
1744  vp->sar = src_frame->sample_aspect_ratio;
1745  vp->uploaded = 0;
1746 
1747  vp->width = src_frame->width;
1748  vp->height = src_frame->height;
1749  vp->format = src_frame->format;
1750 
1751  vp->pts = pts;
1752  vp->duration = duration;
1753  vp->pos = pos;
1754  vp->serial = serial;
1755 
1756  set_default_window_size(vp->width, vp->height, vp->sar);
1757 
1758  av_frame_move_ref(vp->frame, src_frame);
1759  frame_queue_push(&is->pictq);
1760  return 0;
1761 }
1762 
1764 {
1765  int got_picture;
1766 
1767  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1768  return -1;
1769 
1770  if (got_picture) {
1771  double dpts = NAN;
1772 
1773  if (frame->pts != AV_NOPTS_VALUE)
1774  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1775 
1776  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1777 
1779  if (frame->pts != AV_NOPTS_VALUE) {
1780  double diff = dpts - get_master_clock(is);
1781  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1782  diff - is->frame_last_filter_delay < 0 &&
1783  is->viddec.pkt_serial == is->vidclk.serial &&
1784  is->videoq.nb_packets) {
1785  is->frame_drops_early++;
1786  av_frame_unref(frame);
1787  got_picture = 0;
1788  }
1789  }
1790  }
1791  }
1792 
1793  return got_picture;
1794 }
1795 
1796 #if CONFIG_AVFILTER
1797 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1798  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1799 {
1800  int ret, i;
1801  int nb_filters = graph->nb_filters;
1803 
1804  if (filtergraph) {
1805  outputs = avfilter_inout_alloc();
1806  inputs = avfilter_inout_alloc();
1807  if (!outputs || !inputs) {
1808  ret = AVERROR(ENOMEM);
1809  goto fail;
1810  }
1811 
1812  outputs->name = av_strdup("in");
1813  outputs->filter_ctx = source_ctx;
1814  outputs->pad_idx = 0;
1815  outputs->next = NULL;
1816 
1817  inputs->name = av_strdup("out");
1818  inputs->filter_ctx = sink_ctx;
1819  inputs->pad_idx = 0;
1820  inputs->next = NULL;
1821 
1822  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1823  goto fail;
1824  } else {
1825  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1826  goto fail;
1827  }
1828 
1829  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1830  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1831  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1832 
1833  ret = avfilter_graph_config(graph, NULL);
1834 fail:
1835  avfilter_inout_free(&outputs);
1836  avfilter_inout_free(&inputs);
1837  return ret;
1838 }
1839 
1840 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1841 {
1843  char sws_flags_str[512] = "";
1844  char buffersrc_args[256];
1845  int ret;
1846  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1847  AVCodecParameters *codecpar = is->video_st->codecpar;
1848  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1849  AVDictionaryEntry *e = NULL;
1850  int nb_pix_fmts = 0;
1851  int i, j;
1852 
1853  for (i = 0; i < renderer_info.num_texture_formats; i++) {
1854  for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; j++) {
1855  if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
1856  pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
1857  break;
1858  }
1859  }
1860  }
1861  pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE;
1862 
1863  while ((e = av_dict_get(sws_dict, "", e, AV_DICT_IGNORE_SUFFIX))) {
1864  if (!strcmp(e->key, "sws_flags")) {
1865  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1866  } else
1867  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1868  }
1869  if (strlen(sws_flags_str))
1870  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1871 
1872  graph->scale_sws_opts = av_strdup(sws_flags_str);
1873 
1874  snprintf(buffersrc_args, sizeof(buffersrc_args),
1875  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1876  frame->width, frame->height, frame->format,
1878  codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1));
1879  if (fr.num && fr.den)
1880  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1881 
1882  if ((ret = avfilter_graph_create_filter(&filt_src,
1883  avfilter_get_by_name("buffer"),
1884  "ffplay_buffer", buffersrc_args, NULL,
1885  graph)) < 0)
1886  goto fail;
1887 
1888  ret = avfilter_graph_create_filter(&filt_out,
1889  avfilter_get_by_name("buffersink"),
1890  "ffplay_buffersink", NULL, NULL, graph);
1891  if (ret < 0)
1892  goto fail;
1893 
1894  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1895  goto fail;
1896 
1897  last_filter = filt_out;
1898 
1899 /* Note: this macro adds a filter before the lastly added filter, so the
1900  * processing order of the filters is in reverse */
1901 #define INSERT_FILT(name, arg) do { \
1902  AVFilterContext *filt_ctx; \
1903  \
1904  ret = avfilter_graph_create_filter(&filt_ctx, \
1905  avfilter_get_by_name(name), \
1906  "ffplay_" name, arg, NULL, graph); \
1907  if (ret < 0) \
1908  goto fail; \
1909  \
1910  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1911  if (ret < 0) \
1912  goto fail; \
1913  \
1914  last_filter = filt_ctx; \
1915 } while (0)
1916 
1917  if (autorotate) {
1918  double theta = get_rotation(is->video_st);
1919 
1920  if (fabs(theta - 90) < 1.0) {
1921  INSERT_FILT("transpose", "clock");
1922  } else if (fabs(theta - 180) < 1.0) {
1923  INSERT_FILT("hflip", NULL);
1924  INSERT_FILT("vflip", NULL);
1925  } else if (fabs(theta - 270) < 1.0) {
1926  INSERT_FILT("transpose", "cclock");
1927  } else if (fabs(theta) > 1.0) {
1928  char rotate_buf[64];
1929  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1930  INSERT_FILT("rotate", rotate_buf);
1931  }
1932  }
1933 
1934  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1935  goto fail;
1936 
1937  is->in_video_filter = filt_src;
1938  is->out_video_filter = filt_out;
1939 
1940 fail:
1941  return ret;
1942 }
1943 
1944 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1945 {
1947  int sample_rates[2] = { 0, -1 };
1948  int64_t channel_layouts[2] = { 0, -1 };
1949  int channels[2] = { 0, -1 };
1950  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1951  char aresample_swr_opts[512] = "";
1952  AVDictionaryEntry *e = NULL;
1953  char asrc_args[256];
1954  int ret;
1955 
1956  avfilter_graph_free(&is->agraph);
1957  if (!(is->agraph = avfilter_graph_alloc()))
1958  return AVERROR(ENOMEM);
1959 
1960  while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
1961  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
1962  if (strlen(aresample_swr_opts))
1963  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
1964  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
1965 
1966  ret = snprintf(asrc_args, sizeof(asrc_args),
1967  "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1968  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1969  is->audio_filter_src.channels,
1970  1, is->audio_filter_src.freq);
1971  if (is->audio_filter_src.channel_layout)
1972  snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1973  ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
1974 
1975  ret = avfilter_graph_create_filter(&filt_asrc,
1976  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1977  asrc_args, NULL, is->agraph);
1978  if (ret < 0)
1979  goto end;
1980 
1981 
1982  ret = avfilter_graph_create_filter(&filt_asink,
1983  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1984  NULL, NULL, is->agraph);
1985  if (ret < 0)
1986  goto end;
1987 
1988  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1989  goto end;
1990  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1991  goto end;
1992 
1993  if (force_output_format) {
1994  channel_layouts[0] = is->audio_tgt.channel_layout;
1995  channels [0] = is->audio_tgt.channels;
1996  sample_rates [0] = is->audio_tgt.freq;
1997  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
1998  goto end;
1999  if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2000  goto end;
2001  if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2002  goto end;
2003  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2004  goto end;
2005  }
2006 
2007 
2008  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2009  goto end;
2010 
2011  is->in_audio_filter = filt_asrc;
2012  is->out_audio_filter = filt_asink;
2013 
2014 end:
2015  if (ret < 0)
2016  avfilter_graph_free(&is->agraph);
2017  return ret;
2018 }
2019 #endif /* CONFIG_AVFILTER */
2020 
2021 static int audio_thread(void *arg)
2022 {
2023  VideoState *is = arg;
2024  AVFrame *frame = av_frame_alloc();
2025  Frame *af;
2026 #if CONFIG_AVFILTER
2027  int last_serial = -1;
2028  int64_t dec_channel_layout;
2029  int reconfigure;
2030 #endif
2031  int got_frame = 0;
2032  AVRational tb;
2033  int ret = 0;
2034 
2035  if (!frame)
2036  return AVERROR(ENOMEM);
2037 
2038  do {
2039  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2040  goto the_end;
2041 
2042  if (got_frame) {
2043  tb = (AVRational){1, frame->sample_rate};
2044 
2045 #if CONFIG_AVFILTER
2046  dec_channel_layout = get_valid_channel_layout(frame->channel_layout, frame->channels);
2047 
2048  reconfigure =
2049  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2050  frame->format, frame->channels) ||
2051  is->audio_filter_src.channel_layout != dec_channel_layout ||
2052  is->audio_filter_src.freq != frame->sample_rate ||
2053  is->auddec.pkt_serial != last_serial;
2054 
2055  if (reconfigure) {
2056  char buf1[1024], buf2[1024];
2057  av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2058  av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2060  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2061  is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2062  frame->sample_rate, frame->channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
2063 
2064  is->audio_filter_src.fmt = frame->format;
2065  is->audio_filter_src.channels = frame->channels;
2066  is->audio_filter_src.channel_layout = dec_channel_layout;
2067  is->audio_filter_src.freq = frame->sample_rate;
2068  last_serial = is->auddec.pkt_serial;
2069 
2070  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2071  goto the_end;
2072  }
2073 
2074  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2075  goto the_end;
2076 
2077  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2078  tb = av_buffersink_get_time_base(is->out_audio_filter);
2079 #endif
2080  if (!(af = frame_queue_peek_writable(&is->sampq)))
2081  goto the_end;
2082 
2083  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2084  af->pos = frame->pkt_pos;
2085  af->serial = is->auddec.pkt_serial;
2086  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2087 
2088  av_frame_move_ref(af->frame, frame);
2089  frame_queue_push(&is->sampq);
2090 
2091 #if CONFIG_AVFILTER
2092  if (is->audioq.serial != is->auddec.pkt_serial)
2093  break;
2094  }
2095  if (ret == AVERROR_EOF)
2096  is->auddec.finished = is->auddec.pkt_serial;
2097 #endif
2098  }
2099  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2100  the_end:
2101 #if CONFIG_AVFILTER
2102  avfilter_graph_free(&is->agraph);
2103 #endif
2104  av_frame_free(&frame);
2105  return ret;
2106 }
2107 
2108 static int decoder_start(Decoder *d, int (*fn)(void *), void *arg)
2109 {
2111  d->decoder_tid = SDL_CreateThread(fn, "decoder", arg);
2112  if (!d->decoder_tid) {
2113  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2114  return AVERROR(ENOMEM);
2115  }
2116  return 0;
2117 }
2118 
2119 static int video_thread(void *arg)
2120 {
2121  VideoState *is = arg;
2122  AVFrame *frame = av_frame_alloc();
2123  double pts;
2124  double duration;
2125  int ret;
2127  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2128 
2129 #if CONFIG_AVFILTER
2131  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2132  int last_w = 0;
2133  int last_h = 0;
2134  enum AVPixelFormat last_format = -2;
2135  int last_serial = -1;
2136  int last_vfilter_idx = 0;
2137  if (!graph) {
2138  av_frame_free(&frame);
2139  return AVERROR(ENOMEM);
2140  }
2141 
2142 #endif
2143 
2144  if (!frame) {
2145 #if CONFIG_AVFILTER
2146  avfilter_graph_free(&graph);
2147 #endif
2148  return AVERROR(ENOMEM);
2149  }
2150 
2151  for (;;) {
2152  ret = get_video_frame(is, frame);
2153  if (ret < 0)
2154  goto the_end;
2155  if (!ret)
2156  continue;
2157 
2158 #if CONFIG_AVFILTER
2159  if ( last_w != frame->width
2160  || last_h != frame->height
2161  || last_format != frame->format
2162  || last_serial != is->viddec.pkt_serial
2163  || last_vfilter_idx != is->vfilter_idx) {
2165  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2166  last_w, last_h,
2167  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2168  frame->width, frame->height,
2169  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2170  avfilter_graph_free(&graph);
2171  graph = avfilter_graph_alloc();
2172  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2173  SDL_Event event;
2174  event.type = FF_QUIT_EVENT;
2175  event.user.data1 = is;
2176  SDL_PushEvent(&event);
2177  goto the_end;
2178  }
2179  filt_in = is->in_video_filter;
2180  filt_out = is->out_video_filter;
2181  last_w = frame->width;
2182  last_h = frame->height;
2183  last_format = frame->format;
2184  last_serial = is->viddec.pkt_serial;
2185  last_vfilter_idx = is->vfilter_idx;
2186  frame_rate = av_buffersink_get_frame_rate(filt_out);
2187  }
2188 
2189  ret = av_buffersrc_add_frame(filt_in, frame);
2190  if (ret < 0)
2191  goto the_end;
2192 
2193  while (ret >= 0) {
2194  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2195 
2196  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2197  if (ret < 0) {
2198  if (ret == AVERROR_EOF)
2199  is->viddec.finished = is->viddec.pkt_serial;
2200  ret = 0;
2201  break;
2202  }
2203 
2205  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2206  is->frame_last_filter_delay = 0;
2207  tb = av_buffersink_get_time_base(filt_out);
2208 #endif
2209  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2210  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2211  ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);
2212  av_frame_unref(frame);
2213 #if CONFIG_AVFILTER
2214  }
2215 #endif
2216 
2217  if (ret < 0)
2218  goto the_end;
2219  }
2220  the_end:
2221 #if CONFIG_AVFILTER
2222  avfilter_graph_free(&graph);
2223 #endif
2224  av_frame_free(&frame);
2225  return 0;
2226 }
2227 
2228 static int subtitle_thread(void *arg)
2229 {
2230  VideoState *is = arg;
2231  Frame *sp;
2232  int got_subtitle;
2233  double pts;
2234 
2235  for (;;) {
2236  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2237  return 0;
2238 
2239  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2240  break;
2241 
2242  pts = 0;
2243 
2244  if (got_subtitle && sp->sub.format == 0) {
2245  if (sp->sub.pts != AV_NOPTS_VALUE)
2246  pts = sp->sub.pts / (double)AV_TIME_BASE;
2247  sp->pts = pts;
2248  sp->serial = is->subdec.pkt_serial;
2249  sp->width = is->subdec.avctx->width;
2250  sp->height = is->subdec.avctx->height;
2251  sp->uploaded = 0;
2252 
2253  /* now we can update the picture count */
2254  frame_queue_push(&is->subpq);
2255  } else if (got_subtitle) {
2256  avsubtitle_free(&sp->sub);
2257  }
2258  }
2259  return 0;
2260 }
2261 
2262 /* copy samples for viewing in editor window */
2263 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2264 {
2265  int size, len;
2266 
2267  size = samples_size / sizeof(short);
2268  while (size > 0) {
2270  if (len > size)
2271  len = size;
2272  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2273  samples += len;
2274  is->sample_array_index += len;
2276  is->sample_array_index = 0;
2277  size -= len;
2278  }
2279 }
2280 
2281 /* return the wanted number of samples to get better sync if sync_type is video
2282  * or external master clock */
2283 static int synchronize_audio(VideoState *is, int nb_samples)
2284 {
2285  int wanted_nb_samples = nb_samples;
2286 
2287  /* if not master, then we try to remove or add samples to correct the clock */
2289  double diff, avg_diff;
2290  int min_nb_samples, max_nb_samples;
2291 
2292  diff = get_clock(&is->audclk) - get_master_clock(is);
2293 
2294  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2295  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2297  /* not enough measures to have a correct estimate */
2298  is->audio_diff_avg_count++;
2299  } else {
2300  /* estimate the A-V difference */
2301  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2302 
2303  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2304  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2305  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2306  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2307  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2308  }
2309  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2310  diff, avg_diff, wanted_nb_samples - nb_samples,
2312  }
2313  } else {
2314  /* too big difference : may be initial PTS errors, so
2315  reset A-V filter */
2316  is->audio_diff_avg_count = 0;
2317  is->audio_diff_cum = 0;
2318  }
2319  }
2320 
2321  return wanted_nb_samples;
2322 }
2323 
2324 /**
2325  * Decode one audio frame and return its uncompressed size.
2326  *
2327  * The processed audio frame is decoded, converted if required, and
2328  * stored in is->audio_buf, with size in bytes given by the return
2329  * value.
2330  */
2332 {
2333  int data_size, resampled_data_size;
2334  int64_t dec_channel_layout;
2335  av_unused double audio_clock0;
2336  int wanted_nb_samples;
2337  Frame *af;
2338 
2339  if (is->paused)
2340  return -1;
2341 
2342  do {
2343 #if defined(_WIN32)
2344  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2346  return -1;
2347  av_usleep (1000);
2348  }
2349 #endif
2350  if (!(af = frame_queue_peek_readable(&is->sampq)))
2351  return -1;
2352  frame_queue_next(&is->sampq);
2353  } while (af->serial != is->audioq.serial);
2354 
2355  data_size = av_samples_get_buffer_size(NULL, af->frame->channels,
2356  af->frame->nb_samples,
2357  af->frame->format, 1);
2358 
2359  dec_channel_layout =
2362  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2363 
2364  if (af->frame->format != is->audio_src.fmt ||
2365  dec_channel_layout != is->audio_src.channel_layout ||
2366  af->frame->sample_rate != is->audio_src.freq ||
2367  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2368  swr_free(&is->swr_ctx);
2371  dec_channel_layout, af->frame->format, af->frame->sample_rate,
2372  0, NULL);
2373  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2375  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2378  swr_free(&is->swr_ctx);
2379  return -1;
2380  }
2381  is->audio_src.channel_layout = dec_channel_layout;
2382  is->audio_src.channels = af->frame->channels;
2383  is->audio_src.freq = af->frame->sample_rate;
2384  is->audio_src.fmt = af->frame->format;
2385  }
2386 
2387  if (is->swr_ctx) {
2388  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2389  uint8_t **out = &is->audio_buf1;
2390  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2391  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2392  int len2;
2393  if (out_size < 0) {
2394  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2395  return -1;
2396  }
2397  if (wanted_nb_samples != af->frame->nb_samples) {
2398  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2399  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2400  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2401  return -1;
2402  }
2403  }
2404  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2405  if (!is->audio_buf1)
2406  return AVERROR(ENOMEM);
2407  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2408  if (len2 < 0) {
2409  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2410  return -1;
2411  }
2412  if (len2 == out_count) {
2413  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2414  if (swr_init(is->swr_ctx) < 0)
2415  swr_free(&is->swr_ctx);
2416  }
2417  is->audio_buf = is->audio_buf1;
2418  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2419  } else {
2420  is->audio_buf = af->frame->data[0];
2421  resampled_data_size = data_size;
2422  }
2423 
2424  audio_clock0 = is->audio_clock;
2425  /* update the audio clock with the pts */
2426  if (!isnan(af->pts))
2427  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2428  else
2429  is->audio_clock = NAN;
2430  is->audio_clock_serial = af->serial;
2431 #ifdef DEBUG
2432  {
2433  static double last_clock;
2434  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2435  is->audio_clock - last_clock,
2436  is->audio_clock, audio_clock0);
2437  last_clock = is->audio_clock;
2438  }
2439 #endif
2440  return resampled_data_size;
2441 }
2442 
2443 /* prepare a new audio buffer */
2444 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2445 {
2446  VideoState *is = opaque;
2447  int audio_size, len1;
2448 
2450 
2451  while (len > 0) {
2452  if (is->audio_buf_index >= is->audio_buf_size) {
2453  audio_size = audio_decode_frame(is);
2454  if (audio_size < 0) {
2455  /* if error, just output silence */
2456  is->audio_buf = NULL;
2458  } else {
2459  if (is->show_mode != SHOW_MODE_VIDEO)
2460  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2461  is->audio_buf_size = audio_size;
2462  }
2463  is->audio_buf_index = 0;
2464  }
2465  len1 = is->audio_buf_size - is->audio_buf_index;
2466  if (len1 > len)
2467  len1 = len;
2468  if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2469  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2470  else {
2471  memset(stream, 0, len1);
2472  if (!is->muted && is->audio_buf)
2473  SDL_MixAudioFormat(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, is->audio_volume);
2474  }
2475  len -= len1;
2476  stream += len1;
2477  is->audio_buf_index += len1;
2478  }
2480  /* Let's assume the audio driver that is used by SDL has two periods. */
2481  if (!isnan(is->audio_clock)) {
2483  sync_clock_to_slave(&is->extclk, &is->audclk);
2484  }
2485 }
2486 
2487 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2488 {
2489  SDL_AudioSpec wanted_spec, spec;
2490  const char *env;
2491  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2492  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2493  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2494 
2495  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2496  if (env) {
2497  wanted_nb_channels = atoi(env);
2498  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2499  }
2500  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2501  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2502  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2503  }
2504  wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2505  wanted_spec.channels = wanted_nb_channels;
2506  wanted_spec.freq = wanted_sample_rate;
2507  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2508  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2509  return -1;
2510  }
2511  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2512  next_sample_rate_idx--;
2513  wanted_spec.format = AUDIO_S16SYS;
2514  wanted_spec.silence = 0;
2515  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2516  wanted_spec.callback = sdl_audio_callback;
2517  wanted_spec.userdata = opaque;
2518  while (!(audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2519  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2520  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2521  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2522  if (!wanted_spec.channels) {
2523  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2524  wanted_spec.channels = wanted_nb_channels;
2525  if (!wanted_spec.freq) {
2527  "No more combinations to try, audio open failed\n");
2528  return -1;
2529  }
2530  }
2531  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2532  }
2533  if (spec.format != AUDIO_S16SYS) {
2535  "SDL advised audio format %d is not supported!\n", spec.format);
2536  return -1;
2537  }
2538  if (spec.channels != wanted_spec.channels) {
2539  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2540  if (!wanted_channel_layout) {
2542  "SDL advised channel count %d is not supported!\n", spec.channels);
2543  return -1;
2544  }
2545  }
2546 
2547  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2548  audio_hw_params->freq = spec.freq;
2549  audio_hw_params->channel_layout = wanted_channel_layout;
2550  audio_hw_params->channels = spec.channels;
2551  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
2552  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2553  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2554  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2555  return -1;
2556  }
2557  return spec.size;
2558 }
2559 
2560 /* open a given stream. Return 0 if OK */
2561 static int stream_component_open(VideoState *is, int stream_index)
2562 {
2563  AVFormatContext *ic = is->ic;
2564  AVCodecContext *avctx;
2565  AVCodec *codec;
2566  const char *forced_codec_name = NULL;
2567  AVDictionary *opts = NULL;
2568  AVDictionaryEntry *t = NULL;
2569  int sample_rate, nb_channels;
2570  int64_t channel_layout;
2571  int ret = 0;
2572  int stream_lowres = lowres;
2573 
2574  if (stream_index < 0 || stream_index >= ic->nb_streams)
2575  return -1;
2576 
2577  avctx = avcodec_alloc_context3(NULL);
2578  if (!avctx)
2579  return AVERROR(ENOMEM);
2580 
2581  ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2582  if (ret < 0)
2583  goto fail;
2584  avctx->pkt_timebase = ic->streams[stream_index]->time_base;
2585 
2586  codec = avcodec_find_decoder(avctx->codec_id);
2587 
2588  switch(avctx->codec_type){
2589  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2590  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2591  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2592  }
2593  if (forced_codec_name)
2594  codec = avcodec_find_decoder_by_name(forced_codec_name);
2595  if (!codec) {
2596  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2597  "No codec could be found with name '%s'\n", forced_codec_name);
2598  else av_log(NULL, AV_LOG_WARNING,
2599  "No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id));
2600  ret = AVERROR(EINVAL);
2601  goto fail;
2602  }
2603 
2604  avctx->codec_id = codec->id;
2605  if (stream_lowres > codec->max_lowres) {
2606  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2607  codec->max_lowres);
2608  stream_lowres = codec->max_lowres;
2609  }
2610  avctx->lowres = stream_lowres;
2611 
2612  if (fast)
2613  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2614 
2615  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2616  if (!av_dict_get(opts, "threads", NULL, 0))
2617  av_dict_set(&opts, "threads", "auto", 0);
2618  if (stream_lowres)
2619  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2620  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2621  av_dict_set(&opts, "refcounted_frames", "1", 0);
2622  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2623  goto fail;
2624  }
2625  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2626  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2628  goto fail;
2629  }
2630 
2631  is->eof = 0;
2632  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2633  switch (avctx->codec_type) {
2634  case AVMEDIA_TYPE_AUDIO:
2635 #if CONFIG_AVFILTER
2636  {
2637  AVFilterContext *sink;
2638 
2639  is->audio_filter_src.freq = avctx->sample_rate;
2640  is->audio_filter_src.channels = avctx->channels;
2641  is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2642  is->audio_filter_src.fmt = avctx->sample_fmt;
2643  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2644  goto fail;
2645  sink = is->out_audio_filter;
2646  sample_rate = av_buffersink_get_sample_rate(sink);
2647  nb_channels = av_buffersink_get_channels(sink);
2648  channel_layout = av_buffersink_get_channel_layout(sink);
2649  }
2650 #else
2651  sample_rate = avctx->sample_rate;
2652  nb_channels = avctx->channels;
2653  channel_layout = avctx->channel_layout;
2654 #endif
2655 
2656  /* prepare audio output */
2657  if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2658  goto fail;
2659  is->audio_hw_buf_size = ret;
2660  is->audio_src = is->audio_tgt;
2661  is->audio_buf_size = 0;
2662  is->audio_buf_index = 0;
2663 
2664  /* init averaging filter */
2665  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2666  is->audio_diff_avg_count = 0;
2667  /* since we do not have a precise anough audio FIFO fullness,
2668  we correct audio sync only if larger than this threshold */
2670 
2671  is->audio_stream = stream_index;
2672  is->audio_st = ic->streams[stream_index];
2673 
2674  decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
2676  is->auddec.start_pts = is->audio_st->start_time;
2678  }
2679  if ((ret = decoder_start(&is->auddec, audio_thread, is)) < 0)
2680  goto out;
2681  SDL_PauseAudioDevice(audio_dev, 0);
2682  break;
2683  case AVMEDIA_TYPE_VIDEO:
2684  is->video_stream = stream_index;
2685  is->video_st = ic->streams[stream_index];
2686 
2687  decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
2688  if ((ret = decoder_start(&is->viddec, video_thread, is)) < 0)
2689  goto out;
2690  is->queue_attachments_req = 1;
2691  break;
2692  case AVMEDIA_TYPE_SUBTITLE:
2693  is->subtitle_stream = stream_index;
2694  is->subtitle_st = ic->streams[stream_index];
2695 
2696  decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
2697  if ((ret = decoder_start(&is->subdec, subtitle_thread, is)) < 0)
2698  goto out;
2699  break;
2700  default:
2701  break;
2702  }
2703  goto out;
2704 
2705 fail:
2706  avcodec_free_context(&avctx);
2707 out:
2708  av_dict_free(&opts);
2709 
2710  return ret;
2711 }
2712 
2713 static int decode_interrupt_cb(void *ctx)
2714 {
2715  VideoState *is = ctx;
2716  return is->abort_request;
2717 }
2718 
2719 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2720  return stream_id < 0 ||
2721  queue->abort_request ||
2723  queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2724 }
2725 
2727 {
2728  if( !strcmp(s->iformat->name, "rtp")
2729  || !strcmp(s->iformat->name, "rtsp")
2730  || !strcmp(s->iformat->name, "sdp")
2731  )
2732  return 1;
2733 
2734  if(s->pb && ( !strncmp(s->url, "rtp:", 4)
2735  || !strncmp(s->url, "udp:", 4)
2736  )
2737  )
2738  return 1;
2739  return 0;
2740 }
2741 
2742 /* this thread gets the stream from the disk or the network */
2743 static int read_thread(void *arg)
2744 {
2745  VideoState *is = arg;
2746  AVFormatContext *ic = NULL;
2747  int err, i, ret;
2748  int st_index[AVMEDIA_TYPE_NB];
2749  AVPacket pkt1, *pkt = &pkt1;
2750  int64_t stream_start_time;
2751  int pkt_in_play_range = 0;
2752  AVDictionaryEntry *t;
2753  SDL_mutex *wait_mutex = SDL_CreateMutex();
2754  int scan_all_pmts_set = 0;
2755  int64_t pkt_ts;
2756 
2757  if (!wait_mutex) {
2758  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2759  ret = AVERROR(ENOMEM);
2760  goto fail;
2761  }
2762 
2763  memset(st_index, -1, sizeof(st_index));
2764  is->last_video_stream = is->video_stream = -1;
2765  is->last_audio_stream = is->audio_stream = -1;
2766  is->last_subtitle_stream = is->subtitle_stream = -1;
2767  is->eof = 0;
2768 
2769  ic = avformat_alloc_context();
2770  if (!ic) {
2771  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2772  ret = AVERROR(ENOMEM);
2773  goto fail;
2774  }
2776  ic->interrupt_callback.opaque = is;
2777  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2778  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2779  scan_all_pmts_set = 1;
2780  }
2781  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2782  if (err < 0) {
2783  print_error(is->filename, err);
2784  ret = -1;
2785  goto fail;
2786  }
2787  if (scan_all_pmts_set)
2788  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2789 
2791  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2793  goto fail;
2794  }
2795  is->ic = ic;
2796 
2797  if (genpts)
2798  ic->flags |= AVFMT_FLAG_GENPTS;
2799 
2801 
2802  if (find_stream_info) {
2804  int orig_nb_streams = ic->nb_streams;
2805 
2806  err = avformat_find_stream_info(ic, opts);
2807 
2808  for (i = 0; i < orig_nb_streams; i++)
2809  av_dict_free(&opts[i]);
2810  av_freep(&opts);
2811 
2812  if (err < 0) {
2814  "%s: could not find codec parameters\n", is->filename);
2815  ret = -1;
2816  goto fail;
2817  }
2818  }
2819 
2820  if (ic->pb)
2821  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2822 
2823  if (seek_by_bytes < 0)
2824  seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2825 
2826  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2827 
2828  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2829  window_title = av_asprintf("%s - %s", t->value, input_filename);
2830 
2831  /* if seeking requested, we execute it */
2832  if (start_time != AV_NOPTS_VALUE) {
2833  int64_t timestamp;
2834 
2835  timestamp = start_time;
2836  /* add the stream start time */
2837  if (ic->start_time != AV_NOPTS_VALUE)
2838  timestamp += ic->start_time;
2839  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2840  if (ret < 0) {
2841  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2842  is->filename, (double)timestamp / AV_TIME_BASE);
2843  }
2844  }
2845 
2846  is->realtime = is_realtime(ic);
2847 
2848  if (show_status)
2849  av_dump_format(ic, 0, is->filename, 0);
2850 
2851  for (i = 0; i < ic->nb_streams; i++) {
2852  AVStream *st = ic->streams[i];
2853  enum AVMediaType type = st->codecpar->codec_type;
2854  st->discard = AVDISCARD_ALL;
2855  if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
2856  if (avformat_match_stream_specifier(ic, st, wanted_stream_spec[type]) > 0)
2857  st_index[type] = i;
2858  }
2859  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2860  if (wanted_stream_spec[i] && st_index[i] == -1) {
2861  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2862  st_index[i] = INT_MAX;
2863  }
2864  }
2865 
2866  if (!video_disable)
2867  st_index[AVMEDIA_TYPE_VIDEO] =
2869  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2870  if (!audio_disable)
2871  st_index[AVMEDIA_TYPE_AUDIO] =
2873  st_index[AVMEDIA_TYPE_AUDIO],
2874  st_index[AVMEDIA_TYPE_VIDEO],
2875  NULL, 0);
2877  st_index[AVMEDIA_TYPE_SUBTITLE] =
2879  st_index[AVMEDIA_TYPE_SUBTITLE],
2880  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2881  st_index[AVMEDIA_TYPE_AUDIO] :
2882  st_index[AVMEDIA_TYPE_VIDEO]),
2883  NULL, 0);
2884 
2885  is->show_mode = show_mode;
2886  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2887  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2888  AVCodecParameters *codecpar = st->codecpar;
2890  if (codecpar->width)
2891  set_default_window_size(codecpar->width, codecpar->height, sar);
2892  }
2893 
2894  /* open the streams */
2895  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2896  stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2897  }
2898 
2899  ret = -1;
2900  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2901  ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2902  }
2903  if (is->show_mode == SHOW_MODE_NONE)
2904  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2905 
2906  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2907  stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2908  }
2909 
2910  if (is->video_stream < 0 && is->audio_stream < 0) {
2911  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2912  is->filename);
2913  ret = -1;
2914  goto fail;
2915  }
2916 
2917  if (infinite_buffer < 0 && is->realtime)
2918  infinite_buffer = 1;
2919 
2920  for (;;) {
2921  if (is->abort_request)
2922  break;
2923  if (is->paused != is->last_paused) {
2924  is->last_paused = is->paused;
2925  if (is->paused)
2926  is->read_pause_return = av_read_pause(ic);
2927  else
2928  av_read_play(ic);
2929  }
2930 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2931  if (is->paused &&
2932  (!strcmp(ic->iformat->name, "rtsp") ||
2933  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2934  /* wait 10 ms to avoid trying to get another packet */
2935  /* XXX: horrible */
2936  SDL_Delay(10);
2937  continue;
2938  }
2939 #endif
2940  if (is->seek_req) {
2941  int64_t seek_target = is->seek_pos;
2942  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2943  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2944 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2945 // of the seek_pos/seek_rel variables
2946 
2947  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2948  if (ret < 0) {
2950  "%s: error while seeking\n", is->ic->url);
2951  } else {
2952  if (is->audio_stream >= 0) {
2953  packet_queue_flush(&is->audioq);
2954  packet_queue_put(&is->audioq, &flush_pkt);
2955  }
2956  if (is->subtitle_stream >= 0) {
2958  packet_queue_put(&is->subtitleq, &flush_pkt);
2959  }
2960  if (is->video_stream >= 0) {
2961  packet_queue_flush(&is->videoq);
2962  packet_queue_put(&is->videoq, &flush_pkt);
2963  }
2964  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2965  set_clock(&is->extclk, NAN, 0);
2966  } else {
2967  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
2968  }
2969  }
2970  is->seek_req = 0;
2971  is->queue_attachments_req = 1;
2972  is->eof = 0;
2973  if (is->paused)
2974  step_to_next_frame(is);
2975  }
2976  if (is->queue_attachments_req) {
2978  AVPacket copy = { 0 };
2979  if ((ret = av_packet_ref(&copy, &is->video_st->attached_pic)) < 0)
2980  goto fail;
2981  packet_queue_put(&is->videoq, &copy);
2983  }
2984  is->queue_attachments_req = 0;
2985  }
2986 
2987  /* if the queue are full, no need to read more */
2988  if (infinite_buffer<1 &&
2989  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2990  || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
2993  /* wait 10 ms */
2994  SDL_LockMutex(wait_mutex);
2995  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2996  SDL_UnlockMutex(wait_mutex);
2997  continue;
2998  }
2999  if (!is->paused &&
3000  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3001  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3002  if (loop != 1 && (!loop || --loop)) {
3003  stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
3004  } else if (autoexit) {
3005  ret = AVERROR_EOF;
3006  goto fail;
3007  }
3008  }
3009  ret = av_read_frame(ic, pkt);
3010  if (ret < 0) {
3011  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3012  if (is->video_stream >= 0)
3014  if (is->audio_stream >= 0)
3016  if (is->subtitle_stream >= 0)
3018  is->eof = 1;
3019  }
3020  if (ic->pb && ic->pb->error)
3021  break;
3022  SDL_LockMutex(wait_mutex);
3023  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3024  SDL_UnlockMutex(wait_mutex);
3025  continue;
3026  } else {
3027  is->eof = 0;
3028  }
3029  /* check if packet is in play range specified by user, then queue, otherwise discard */
3030  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3031  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3032  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3033  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3034  av_q2d(ic->streams[pkt->stream_index]->time_base) -
3035  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3036  <= ((double)duration / 1000000);
3037  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3038  packet_queue_put(&is->audioq, pkt);
3039  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3041  packet_queue_put(&is->videoq, pkt);
3042  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3043  packet_queue_put(&is->subtitleq, pkt);
3044  } else {
3045  av_packet_unref(pkt);
3046  }
3047  }
3048 
3049  ret = 0;
3050  fail:
3051  if (ic && !is->ic)
3052  avformat_close_input(&ic);
3053 
3054  if (ret != 0) {
3055  SDL_Event event;
3056 
3057  event.type = FF_QUIT_EVENT;
3058  event.user.data1 = is;
3059  SDL_PushEvent(&event);
3060  }
3061  SDL_DestroyMutex(wait_mutex);
3062  return 0;
3063 }
3064 
3065 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
3066 {
3067  VideoState *is;
3068 
3069  is = av_mallocz(sizeof(VideoState));
3070  if (!is)
3071  return NULL;
3072  is->filename = av_strdup(filename);
3073  if (!is->filename)
3074  goto fail;
3075  is->iformat = iformat;
3076  is->ytop = 0;
3077  is->xleft = 0;
3078 
3079  /* start video display */
3080  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3081  goto fail;
3082  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3083  goto fail;
3084  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3085  goto fail;
3086 
3087  if (packet_queue_init(&is->videoq) < 0 ||
3088  packet_queue_init(&is->audioq) < 0 ||
3089  packet_queue_init(&is->subtitleq) < 0)
3090  goto fail;
3091 
3092  if (!(is->continue_read_thread = SDL_CreateCond())) {
3093  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3094  goto fail;
3095  }
3096 
3097  init_clock(&is->vidclk, &is->videoq.serial);
3098  init_clock(&is->audclk, &is->audioq.serial);
3099  init_clock(&is->extclk, &is->extclk.serial);
3100  is->audio_clock_serial = -1;
3101  if (startup_volume < 0)
3102  av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
3103  if (startup_volume > 100)
3104  av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
3105  startup_volume = av_clip(startup_volume, 0, 100);
3106  startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
3108  is->muted = 0;
3109  is->av_sync_type = av_sync_type;
3110  is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
3111  if (!is->read_tid) {
3112  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3113 fail:
3114  stream_close(is);
3115  return NULL;
3116  }
3117  return is;
3118 }
3119 
3121 {
3122  AVFormatContext *ic = is->ic;
3123  int start_index, stream_index;
3124  int old_index;
3125  AVStream *st;
3126  AVProgram *p = NULL;
3127  int nb_streams = is->ic->nb_streams;
3128 
3129  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3130  start_index = is->last_video_stream;
3131  old_index = is->video_stream;
3132  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3133  start_index = is->last_audio_stream;
3134  old_index = is->audio_stream;
3135  } else {
3136  start_index = is->last_subtitle_stream;
3137  old_index = is->subtitle_stream;
3138  }
3139  stream_index = start_index;
3140 
3141  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3143  if (p) {
3144  nb_streams = p->nb_stream_indexes;
3145  for (start_index = 0; start_index < nb_streams; start_index++)
3146  if (p->stream_index[start_index] == stream_index)
3147  break;
3148  if (start_index == nb_streams)
3149  start_index = -1;
3150  stream_index = start_index;
3151  }
3152  }
3153 
3154  for (;;) {
3155  if (++stream_index >= nb_streams)
3156  {
3157  if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3158  {
3159  stream_index = -1;
3160  is->last_subtitle_stream = -1;
3161  goto the_end;
3162  }
3163  if (start_index == -1)
3164  return;
3165  stream_index = 0;
3166  }
3167  if (stream_index == start_index)
3168  return;
3169  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3170  if (st->codecpar->codec_type == codec_type) {
3171  /* check that parameters are OK */
3172  switch (codec_type) {
3173  case AVMEDIA_TYPE_AUDIO:
3174  if (st->codecpar->sample_rate != 0 &&
3175  st->codecpar->channels != 0)
3176  goto the_end;
3177  break;
3178  case AVMEDIA_TYPE_VIDEO:
3179  case AVMEDIA_TYPE_SUBTITLE:
3180  goto the_end;
3181  default:
3182  break;
3183  }
3184  }
3185  }
3186  the_end:
3187  if (p && stream_index != -1)
3188  stream_index = p->stream_index[stream_index];
3189  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3190  av_get_media_type_string(codec_type),
3191  old_index,
3192  stream_index);
3193 
3194  stream_component_close(is, old_index);
3195  stream_component_open(is, stream_index);
3196 }
3197 
3198 
3200 {
3202  SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
3203 }
3204 
3206 {
3207  int next = is->show_mode;
3208  do {
3209  next = (next + 1) % SHOW_MODE_NB;
3210  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3211  if (is->show_mode != next) {
3212  is->force_refresh = 1;
3213  is->show_mode = next;
3214  }
3215 }
3216 
3217 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3218  double remaining_time = 0.0;
3219  SDL_PumpEvents();
3220  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3222  SDL_ShowCursor(0);
3223  cursor_hidden = 1;
3224  }
3225  if (remaining_time > 0.0)
3226  av_usleep((int64_t)(remaining_time * 1000000.0));
3227  remaining_time = REFRESH_RATE;
3228  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3229  video_refresh(is, &remaining_time);
3230  SDL_PumpEvents();
3231  }
3232 }
3233 
3234 static void seek_chapter(VideoState *is, int incr)
3235 {
3236  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3237  int i;
3238 
3239  if (!is->ic->nb_chapters)
3240  return;
3241 
3242  /* find the current chapter */
3243  for (i = 0; i < is->ic->nb_chapters; i++) {
3244  AVChapter *ch = is->ic->chapters[i];
3245  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3246  i--;
3247  break;
3248  }
3249  }
3250 
3251  i += incr;
3252  i = FFMAX(i, 0);
3253  if (i >= is->ic->nb_chapters)
3254  return;
3255 
3256  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3257  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3258  AV_TIME_BASE_Q), 0, 0);
3259 }
3260 
3261 /* handle an event sent by the GUI */
3262 static void event_loop(VideoState *cur_stream)
3263 {
3264  SDL_Event event;
3265  double incr, pos, frac;
3266 
3267  for (;;) {
3268  double x;
3269  refresh_loop_wait_event(cur_stream, &event);
3270  switch (event.type) {
3271  case SDL_KEYDOWN:
3272  if (exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
3273  do_exit(cur_stream);
3274  break;
3275  }
3276  // If we don't yet have a window, skip all key events, because read_thread might still be initializing...
3277  if (!cur_stream->width)
3278  continue;
3279  switch (event.key.keysym.sym) {
3280  case SDLK_f:
3281  toggle_full_screen(cur_stream);
3282  cur_stream->force_refresh = 1;
3283  break;
3284  case SDLK_p:
3285  case SDLK_SPACE:
3286  toggle_pause(cur_stream);
3287  break;
3288  case SDLK_m:
3289  toggle_mute(cur_stream);
3290  break;
3291  case SDLK_KP_MULTIPLY:
3292  case SDLK_0:
3293  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3294  break;
3295  case SDLK_KP_DIVIDE:
3296  case SDLK_9:
3297  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3298  break;
3299  case SDLK_s: // S: Step to next frame
3300  step_to_next_frame(cur_stream);
3301  break;
3302  case SDLK_a:
3304  break;
3305  case SDLK_v:
3307  break;
3308  case SDLK_c:
3312  break;
3313  case SDLK_t:
3315  break;
3316  case SDLK_w:
3317 #if CONFIG_AVFILTER
3318  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3319  if (++cur_stream->vfilter_idx >= nb_vfilters)
3320  cur_stream->vfilter_idx = 0;
3321  } else {
3322  cur_stream->vfilter_idx = 0;
3323  toggle_audio_display(cur_stream);
3324  }
3325 #else
3326  toggle_audio_display(cur_stream);
3327 #endif
3328  break;
3329  case SDLK_PAGEUP:
3330  if (cur_stream->ic->nb_chapters <= 1) {
3331  incr = 600.0;
3332  goto do_seek;
3333  }
3334  seek_chapter(cur_stream, 1);
3335  break;
3336  case SDLK_PAGEDOWN:
3337  if (cur_stream->ic->nb_chapters <= 1) {
3338  incr = -600.0;
3339  goto do_seek;
3340  }
3341  seek_chapter(cur_stream, -1);
3342  break;
3343  case SDLK_LEFT:
3344  incr = -10.0;
3345  goto do_seek;
3346  case SDLK_RIGHT:
3347  incr = 10.0;
3348  goto do_seek;
3349  case SDLK_UP:
3350  incr = 60.0;
3351  goto do_seek;
3352  case SDLK_DOWN:
3353  incr = -60.0;
3354  do_seek:
3355  if (seek_by_bytes) {
3356  pos = -1;
3357  if (pos < 0 && cur_stream->video_stream >= 0)
3358  pos = frame_queue_last_pos(&cur_stream->pictq);
3359  if (pos < 0 && cur_stream->audio_stream >= 0)
3360  pos = frame_queue_last_pos(&cur_stream->sampq);
3361  if (pos < 0)
3362  pos = avio_tell(cur_stream->ic->pb);
3363  if (cur_stream->ic->bit_rate)
3364  incr *= cur_stream->ic->bit_rate / 8.0;
3365  else
3366  incr *= 180000.0;
3367  pos += incr;
3368  stream_seek(cur_stream, pos, incr, 1);
3369  } else {
3370  pos = get_master_clock(cur_stream);
3371  if (isnan(pos))
3372  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3373  pos += incr;
3374  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3375  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3376  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3377  }
3378  break;
3379  default:
3380  break;
3381  }
3382  break;
3383  case SDL_MOUSEBUTTONDOWN:
3384  if (exit_on_mousedown) {
3385  do_exit(cur_stream);
3386  break;
3387  }
3388  if (event.button.button == SDL_BUTTON_LEFT) {
3389  static int64_t last_mouse_left_click = 0;
3390  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3391  toggle_full_screen(cur_stream);
3392  cur_stream->force_refresh = 1;
3393  last_mouse_left_click = 0;
3394  } else {
3395  last_mouse_left_click = av_gettime_relative();
3396  }
3397  }
3398  case SDL_MOUSEMOTION:
3399  if (cursor_hidden) {
3400  SDL_ShowCursor(1);
3401  cursor_hidden = 0;
3402  }
3404  if (event.type == SDL_MOUSEBUTTONDOWN) {
3405  if (event.button.button != SDL_BUTTON_RIGHT)
3406  break;
3407  x = event.button.x;
3408  } else {
3409  if (!(event.motion.state & SDL_BUTTON_RMASK))
3410  break;
3411  x = event.motion.x;
3412  }
3413  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3414  uint64_t size = avio_size(cur_stream->ic->pb);
3415  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3416  } else {
3417  int64_t ts;
3418  int ns, hh, mm, ss;
3419  int tns, thh, tmm, tss;
3420  tns = cur_stream->ic->duration / 1000000LL;
3421  thh = tns / 3600;
3422  tmm = (tns % 3600) / 60;
3423  tss = (tns % 60);
3424  frac = x / cur_stream->width;
3425  ns = frac * tns;
3426  hh = ns / 3600;
3427  mm = (ns % 3600) / 60;
3428  ss = (ns % 60);
3430  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3431  hh, mm, ss, thh, tmm, tss);
3432  ts = frac * cur_stream->ic->duration;
3433  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3434  ts += cur_stream->ic->start_time;
3435  stream_seek(cur_stream, ts, 0, 0);
3436  }
3437  break;
3438  case SDL_WINDOWEVENT:
3439  switch (event.window.event) {
3440  case SDL_WINDOWEVENT_RESIZED:
3441  screen_width = cur_stream->width = event.window.data1;
3442  screen_height = cur_stream->height = event.window.data2;
3443  if (cur_stream->vis_texture) {
3444  SDL_DestroyTexture(cur_stream->vis_texture);
3445  cur_stream->vis_texture = NULL;
3446  }
3447  case SDL_WINDOWEVENT_EXPOSED:
3448  cur_stream->force_refresh = 1;
3449  }
3450  break;
3451  case SDL_QUIT:
3452  case FF_QUIT_EVENT:
3453  do_exit(cur_stream);
3454  break;
3455  default:
3456  break;
3457  }
3458  }
3459 }
3460 
3461 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3462 {
3463  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3464  return opt_default(NULL, "video_size", arg);
3465 }
3466 
3467 static int opt_width(void *optctx, const char *opt, const char *arg)
3468 {
3469  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3470  return 0;
3471 }
3472 
3473 static int opt_height(void *optctx, const char *opt, const char *arg)
3474 {
3475  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3476  return 0;
3477 }
3478 
3479 static int opt_format(void *optctx, const char *opt, const char *arg)
3480 {
3481  file_iformat = av_find_input_format(arg);
3482  if (!file_iformat) {
3483  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3484  return AVERROR(EINVAL);
3485  }
3486  return 0;
3487 }
3488 
3489 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3490 {
3491  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3492  return opt_default(NULL, "pixel_format", arg);
3493 }
3494 
3495 static int opt_sync(void *optctx, const char *opt, const char *arg)
3496 {
3497  if (!strcmp(arg, "audio"))
3499  else if (!strcmp(arg, "video"))
3501  else if (!strcmp(arg, "ext"))
3503  else {
3504  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3505  exit(1);
3506  }
3507  return 0;
3508 }
3509 
3510 static int opt_seek(void *optctx, const char *opt, const char *arg)
3511 {
3512  start_time = parse_time_or_die(opt, arg, 1);
3513  return 0;
3514 }
3515 
3516 static int opt_duration(void *optctx, const char *opt, const char *arg)
3517 {
3518  duration = parse_time_or_die(opt, arg, 1);
3519  return 0;
3520 }
3521 
3522 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3523 {
3524  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3525  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3526  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3527  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3528  return 0;
3529 }
3530 
3531 static void opt_input_file(void *optctx, const char *filename)
3532 {
3533  if (input_filename) {
3535  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3536  filename, input_filename);
3537  exit(1);
3538  }
3539  if (!strcmp(filename, "-"))
3540  filename = "pipe:";
3541  input_filename = filename;
3542 }
3543 
3544 static int opt_codec(void *optctx, const char *opt, const char *arg)
3545 {
3546  const char *spec = strchr(opt, ':');
3547  if (!spec) {
3549  "No media specifier was specified in '%s' in option '%s'\n",
3550  arg, opt);
3551  return AVERROR(EINVAL);
3552  }
3553  spec++;
3554  switch (spec[0]) {
3555  case 'a' : audio_codec_name = arg; break;
3556  case 's' : subtitle_codec_name = arg; break;
3557  case 'v' : video_codec_name = arg; break;
3558  default:
3560  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3561  return AVERROR(EINVAL);
3562  }
3563  return 0;
3564 }
3565 
3566 static int dummy;
3567 
3568 static const OptionDef options[] = {
3570  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3571  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3572  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3573  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3574  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3575  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3576  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3577  { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3578  { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3579  { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3580  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3581  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3582  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3583  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3584  { "noborder", OPT_BOOL, { &borderless }, "borderless window" },
3585  { "volume", OPT_INT | HAS_ARG, { &startup_volume}, "set startup volume 0=min 100=max", "volume" },
3586  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3587  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3588  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3589  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3590  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3591  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3592  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3593  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3594  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3595  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3596  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3597  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3598  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3599  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3600  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3601 #if CONFIG_AVFILTER
3602  { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3603  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3604 #endif
3605  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3606  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3607  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3608  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3609  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3610  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3611  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3612  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3613  { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3614  { "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
3615  "read and decode the streams to fill missing information with heuristics" },
3616  { NULL, },
3617 };
3618 
3619 static void show_usage(void)
3620 {
3621  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3622  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3623  av_log(NULL, AV_LOG_INFO, "\n");
3624 }
3625 
3626 void show_help_default(const char *opt, const char *arg)
3627 {
3629  show_usage();
3630  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3631  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3632  printf("\n");
3635 #if !CONFIG_AVFILTER
3637 #else
3639 #endif
3640  printf("\nWhile playing:\n"
3641  "q, ESC quit\n"
3642  "f toggle full screen\n"
3643  "p, SPC pause\n"
3644  "m toggle mute\n"
3645  "9, 0 decrease and increase volume respectively\n"
3646  "/, * decrease and increase volume respectively\n"
3647  "a cycle audio channel in the current program\n"
3648  "v cycle video channel\n"
3649  "t cycle subtitle channel in the current program\n"
3650  "c cycle program\n"
3651  "w cycle video filters or show modes\n"
3652  "s activate frame-step mode\n"
3653  "left/right seek backward/forward 10 seconds\n"
3654  "down/up seek backward/forward 1 minute\n"
3655  "page down/page up seek backward/forward 10 minutes\n"
3656  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3657  "left double-click toggle full screen\n"
3658  );
3659 }
3660 
3661 /* Called from the main */
3662 int main(int argc, char **argv)
3663 {
3664  int flags;
3665  VideoState *is;
3666 
3667  init_dynload();
3668 
3670  parse_loglevel(argc, argv, options);
3671 
3672  /* register all codecs, demux and protocols */
3673 #if CONFIG_AVDEVICE
3675 #endif
3677 
3678  init_opts();
3679 
3680  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3681  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3682 
3683  show_banner(argc, argv, options);
3684 
3685  parse_options(NULL, argc, argv, options, opt_input_file);
3686 
3687  if (!input_filename) {
3688  show_usage();
3689  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3691  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3692  exit(1);
3693  }
3694 
3695  if (display_disable) {
3696  video_disable = 1;
3697  }
3698  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3699  if (audio_disable)
3700  flags &= ~SDL_INIT_AUDIO;
3701  else {
3702  /* Try to work around an occasional ALSA buffer underflow issue when the
3703  * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3704  if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3705  SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
3706  }
3707  if (display_disable)
3708  flags &= ~SDL_INIT_VIDEO;
3709  if (SDL_Init (flags)) {
3710  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3711  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3712  exit(1);
3713  }
3714 
3715  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3716  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3717 
3718  av_init_packet(&flush_pkt);
3719  flush_pkt.data = (uint8_t *)&flush_pkt;
3720 
3721  if (!display_disable) {
3722  int flags = SDL_WINDOW_HIDDEN;
3723  if (borderless)
3724  flags |= SDL_WINDOW_BORDERLESS;
3725  else
3726  flags |= SDL_WINDOW_RESIZABLE;
3727  window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
3728  SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
3729  if (window) {
3730  renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
3731  if (!renderer) {
3732  av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
3733  renderer = SDL_CreateRenderer(window, -1, 0);
3734  }
3735  if (renderer) {
3736  if (!SDL_GetRendererInfo(renderer, &renderer_info))
3737  av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", renderer_info.name);
3738  }
3739  }
3740  if (!window || !renderer || !renderer_info.num_texture_formats) {
3741  av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError());
3742  do_exit(NULL);
3743  }
3744  }
3745 
3746  is = stream_open(input_filename, file_iformat);
3747  if (!is) {
3748  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3749  do_exit(NULL);
3750  }
3751 
3752  event_loop(is);
3753 
3754  /* never returns */
3755 
3756  return 0;
3757 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1580
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:94
AVFilterContext ** filters
Definition: avfilter.h:842
static void packet_queue_abort(PacketQueue *q)
Definition: ffplay.c:518
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:488
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3522
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:120
static void frame_queue_push(FrameQueue *f)
Definition: ffplay.c:772
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:81
static SDL_AudioDeviceID audio_dev
Definition: ffplay.c:365
static void video_image_display(VideoState *is)
Definition: ffplay.c:973
#define NULL
Definition: coverity.c:32
Clock audclk
Definition: ffplay.c:219
const char const char void * val
Definition: avisynth_c.h:771
#define AVFMT_NOBINSEARCH
Format does not allow to fall back on binary search via read_timestamp.
Definition: avformat.h:475
static void packet_queue_flush(PacketQueue *q)
Definition: ffplay.c:493
int width
Definition: ffplay.c:292
#define OPT_EXPERT
Definition: cmdutils.h:163
static double get_clock(Clock *c)
Definition: ffplay.c:1374
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:336
enum AVSampleFormat fmt
Definition: ffplay.c:138
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3473
static const struct TextureFormatEntry sdl_texture_format_map[]
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffplay.c:64
static void copy(const float *p1, float *p2, const int length)
SDL_cond * cond
Definition: ffplay.c:178
static const char * format[]
Definition: af_aiir.c:330
int64_t pts_correction_num_faulty_dts
Number of incorrect PTS values so far.
Definition: avcodec.h:3081
static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
Definition: ffplay.c:2487
FrameQueue pictq
Definition: ffplay.c:223
Decoder auddec
Definition: ffplay.c:227
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:92
AVStream * subtitle_st
Definition: ffplay.c:277
This structure describes decoded (raw) audio or video data.
Definition: frame.h:218
#define VIDEO_PICTURE_QUEUE_SIZE
Definition: ffplay.c:129
static SDL_Renderer * renderer
Definition: ffplay.c:363
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3829
#define SWS_BICUBIC
Definition: swscale.h:60
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1629
double rdftspeed
Definition: ffplay.c:343
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
double frame_timer
Definition: ffplay.c:280
static AVInputFormat * file_iformat
Definition: ffplay.c:310
#define OPT_VIDEO
Definition: cmdutils.h:165
static void opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3531
double get_rotation(AVStream *st)
Definition: cmdutils.c:2175
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:490
misc image utilities
static int opt_format(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3479
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:83
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AVRational next_pts_tb
Definition: ffplay.c:199
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1422
Main libavfilter public API header.
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
int rindex
Definition: ffplay.c:171
#define AV_SYNC_FRAMEDUP_THRESHOLD
Definition: ffplay.c:84
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:79
static int default_height
Definition: ffplay.c:314
Memory buffer source API.
double frame_last_filter_delay
Definition: ffplay.c:282
FrameQueue sampq
Definition: ffplay.c:225
enum VideoState::ShowMode show_mode
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:203
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:492
AVRational sample_aspect_ratio
Video only.
Definition: avcodec.h:3963
int seek_flags
Definition: ffplay.c:212
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:1014
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:708
int serial
Definition: ffplay.c:124
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio...
Definition: utils.c:5023
channels
Definition: aptx.c:30
#define OPT_AUDIO
Definition: cmdutils.h:166
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3120
int num
Numerator.
Definition: rational.h:59
static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3489
int size
Definition: avcodec.h:1434
const char * b
Definition: vf_curves.c:113
static void stream_toggle_pause(VideoState *is)
Definition: ffplay.c:1486
#define DEBUG
Definition: vf_framerate.c:29
MyAVPacketList * first_pkt
Definition: ffplay.c:119
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
Definition: ffplay.c:1472
int av_log2(unsigned v)
Definition: intmath.c:26
static int seek_by_bytes
Definition: ffplay.c:321
double audio_diff_cum
Definition: ffplay.c:237
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
#define REFRESH_RATE
Definition: ffplay.c:100
AVInputFormat * iformat
Definition: ffplay.c:205
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:493
enum AVMediaType codec_type
Definition: rtp.c:37
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1473
AVCodecContext * avctx
Definition: ffplay.c:191
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1180
int paused
Definition: ffplay.c:208
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3544
static AVStream * video_stream
static unsigned sws_flags
Definition: ffplay.c:110
int abort_request
Definition: ffplay.c:123
AVS_VideoFrame int int int int new_height
Definition: avisynth_c.h:818
unsigned num_rects
Definition: avcodec.h:3867
static void set_clock_at(Clock *c, double pts, int serial, double time)
Definition: ffplay.c:1386
static void toggle_pause(VideoState *is)
Definition: ffplay.c:1499
int out_size
Definition: movenc.c:55
double audio_diff_threshold
Definition: ffplay.c:239
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:580
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
discard all
Definition: avcodec.h:797
int64_t channel_layout
Definition: ffplay.c:137
static AVPacket pkt
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
Definition: options.c:95
static int audio_disable
Definition: ffplay.c:317
AVStream * audio_st
Definition: ffplay.c:241
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:1029
#define AV_PIX_FMT_RGB444
Definition: pixfmt.h:368
static const char * audio_codec_name
Definition: ffplay.c:340
#define fn(a)
int serial
Definition: ffplay.c:157
AVCodec.
Definition: avcodec.h:3411
static void seek_chapter(VideoState *is, int incr)
Definition: ffplay.c:3234
double pts_drift
Definition: ffplay.c:145
#define CMDUTILS_COMMON_OPTIONS
Definition: cmdutils.h:215
AVDictionary * filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, AVCodec *codec)
Filter out options for given codec.
Definition: cmdutils.c:2079
This struct describes the properties of an encoded stream.
Definition: avcodec.h:3879
int width
Definition: ffplay.c:161
functionally identical to above
Definition: pixfmt.h:494
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:845
AVStream * video_st
Definition: ffplay.c:284
Clock extclk
Definition: ffplay.c:221
static VideoState * stream_open(const char *filename, AVInputFormat *iformat)
Definition: ffplay.c:3065
void * opaque
Definition: avio.h:60
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
struct SwsContext * img_convert_ctx
Definition: ffplay.c:287
AVSubtitleRect ** rects
Definition: avcodec.h:3868
Format I/O context.
Definition: avformat.h:1351
static void toggle_audio_display(VideoState *is)
Definition: ffplay.c:3205
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: utils.c:5046
Definition: ffplay.c:154
memory buffer sink API for audio and video
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:294
static int16_t block[64]
Definition: dct.c:115
int av_sync_type
Definition: ffplay.c:233
unsigned int nb_stream_indexes
Definition: avformat.h:1273
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
int rindex_shown
Definition: ffplay.c:176
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:3831
double pts
Definition: ffplay.c:158
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
Public dictionary API.
double audio_diff_avg_coef
Definition: ffplay.c:238
AVRational start_pts_tb
Definition: ffplay.c:197
static int read_thread(void *arg)
Definition: ffplay.c:2743
int keep_last
Definition: ffplay.c:175
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:135
int rdft_bits
Definition: ffplay.c:268
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:889
int size
Definition: ffplay.c:121
static Frame * frame_queue_peek(FrameQueue *f)
Definition: ffplay.c:725
static int64_t start_time
Definition: ffplay.c:327
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2184
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
Definition: cmdutils.c:99
uint8_t
static int nb_streams
Definition: ffprobe.c:276
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
int opt_default(void *optctx, const char *opt, const char *arg)
Fallback for options that are not explicitly handled, these will be parsed through AVOptions...
Definition: cmdutils.c:545
static int default_width
Definition: ffplay.c:313
int last_video_stream
Definition: ffplay.c:304
int width
Video only.
Definition: avcodec.h:3953
int last_subtitle_stream
Definition: ffplay.c:304
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
AVOptions.
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:661
#define HAS_ARG
Definition: cmdutils.h:161
int audio_hw_buf_size
Definition: ffplay.c:243
static void stream_component_close(VideoState *is, int stream_index)
Definition: ffplay.c:1203
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:202
static int decode_interrupt_cb(void *ctx)
Definition: ffplay.c:2713
#define f(width, name)
Definition: cbs_vp9.c:255
int64_t duration
Definition: ffplay.c:122
struct SwrContext * swr_ctx
Definition: ffplay.c:257
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1451
int finished
Definition: ffplay.c:193
static void event_loop(VideoState *cur_stream)
Definition: ffplay.c:3262
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
Definition: ffplay.c:403
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
Definition: utils.c:5067
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:311
static void packet_queue_destroy(PacketQueue *q)
Definition: ffplay.c:511
#define AVFMT_FLAG_GENPTS
Generate missing pts even if it requires parsing future frames.
Definition: avformat.h:1483
static int framedrop
Definition: ffplay.c:337
SDL_Texture * vis_texture
Definition: ffplay.c:272
void init_opts(void)
Initialize the cmdutils option system, in particular allocate the *_opts contexts.
Definition: cmdutils.c:85
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1419
AVPacket pkt
Definition: ffplay.c:113
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: utils.c:2087
int bytes_per_sec
Definition: ffplay.c:140
static AVFrame * frame
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:144
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
static int64_t audio_callback_time
Definition: ffplay.c:356
#define height
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
static int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
Definition: ffplay.c:414
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1482
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
Definition: ffplay.c:538
static void sigterm_handler(int sig)
Definition: ffplay.c:1318
uint8_t * data
Definition: avcodec.h:1433
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:383
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
int freq
Definition: ffplay.c:135
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:647
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:4960
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
Frame queue[FRAME_QUEUE_SIZE]
Definition: ffplay.c:170
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define sp
Definition: regdef.h:63
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
Definition: ffplay.c:143
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:2752
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:168
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:506
external API header
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:86
ptrdiff_t size
Definition: opengl_enc.c:101
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:3832
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:557
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags, int alt_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:177
#define flags(name, subs,...)
Definition: cbs_h2645.c:263
static int64_t duration
Definition: ffplay.c:328
AVRational sar
Definition: ffplay.c:164
unsigned int * stream_index
Definition: avformat.h:1272
#define av_log(a,...)
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
Definition: opt.h:276
PacketQueue videoq
Definition: ffplay.c:285
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:601
AVDictionary ** setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts)
Setup AVCodecContext options for avformat_find_stream_info().
Definition: cmdutils.c:2136
AVDictionary * format_opts
Definition: cmdutils.c:73
static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
Definition: ffplay.c:833
static int borderless
Definition: ffplay.c:323
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:308
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:104
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
static void toggle_mute(VideoState *is)
Definition: ffplay.c:1505
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:563
Main libavdevice API header.
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
Definition: utils.c:4184
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:3057
libswresample public header
enum AVCodecID id
Definition: avcodec.h:3425
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: utils.c:4167
int audio_diff_avg_count
Definition: ffplay.c:240
int ytop
Definition: ffplay.c:292
int width
Definition: frame.h:276
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:69
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1591
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:355
int seek_req
Definition: ffplay.c:211
#define AV_PIX_FMT_NE(be, le)
Definition: pixfmt.h:349
int(* callback)(void *)
Definition: avio.h:59
#define FRAME_QUEUE_SIZE
Definition: ffplay.c:132
static void video_refresh(void *opaque, double *remaining_time)
Definition: ffplay.c:1574
int read_pause_return
Definition: ffplay.c:215
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:471
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:291
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3830
#define AVERROR(e)
Definition: error.h:43
static AVStream * audio_stream
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:782
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
The libswresample context.
#define MIN_FRAMES
Definition: ffplay.c:67
static int decoder_start(Decoder *d, int(*fn)(void *), void *arg)
Definition: ffplay.c:2108
RDFTContext * rdft
Definition: ffplay.c:267
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:798
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
#define EXTERNAL_CLOCK_MAX_FRAMES
Definition: ffplay.c:69
char * url
input or output URL.
Definition: avformat.h:1447
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:744
static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
Definition: ffplay.c:465
static int autorotate
Definition: ffplay.c:351
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:463
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:106
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: utils.c:4251
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1525
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
enum AVMediaType codec_type
General type of the encoded data.
Definition: avcodec.h:3883
const char * arg
Definition: jacosubdec.c:66
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:561
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:474
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:422
AVChapter ** chapters
Definition: avformat.h:1581
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:345
uint16_t width
Definition: gdv.c:47
simple assert() macros that are a bit more flexible than ISO C assert().
static void stream_close(VideoState *is)
Definition: ffplay.c:1260
static int find_stream_info
Definition: ffplay.c:352
#define AV_PIX_FMT_0BGR32
Definition: pixfmt.h:357
int video_stream
Definition: ffplay.c:283
static int video_open(VideoState *is)
Definition: ffplay.c:1331
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
int * queue_serial
Definition: ffplay.c:150
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1406
int xpos
Definition: ffplay.c:270
int channels
Definition: ffplay.c:136
static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
Definition: ffplay.c:888
static enum ShowMode show_mode
Definition: ffplay.c:339
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1268
#define FFMAX(a, b)
Definition: common.h:94
static void packet_queue_start(PacketQueue *q)
Definition: ffplay.c:529
static const OptionDef options[]
Definition: ffplay.c:3568
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:119
float FFTSample
Definition: avfft.h:35
static int dummy
Definition: ffplay.c:3566
#define fail()
Definition: checkasm.h:117
int8_t exp
Definition: eval.c:72
enum AVPixelFormat format
Definition: ffplay.c:368
double audio_clock
Definition: ffplay.c:235
int force_refresh
Definition: ffplay.c:207
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2227
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
AVDictionary * sws_dict
Definition: cmdutils.c:71
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3495
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2366
static void update_sample_display(VideoState *is, short *samples, int samples_size)
Definition: ffplay.c:2263
void av_rdft_calc(RDFTContext *s, FFTSample *data)
uint32_t end_display_time
Definition: avcodec.h:3866
static void decoder_destroy(Decoder *d)
Definition: ffplay.c:674
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:3869
static int genpts
Definition: ffplay.c:330
static AVPacket flush_pkt
Definition: ffplay.c:358
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:396
int flip_v
Definition: ffplay.c:166
double frame_last_returned_time
Definition: ffplay.c:281
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:450
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:488
static const char * subtitle_codec_name
Definition: ffplay.c:341
static int subtitle_disable
Definition: ffplay.c:319
void av_format_inject_global_side_data(AVFormatContext *s)
This function will cause global side data to be injected in the next packet of each stream as well as...
Definition: utils.c:154
int max_size
Definition: ffplay.c:174
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1407
int step
Definition: ffplay.c:293
SDL_Thread * decoder_tid
Definition: ffplay.c:200
AVDictionary * opts
Definition: movenc.c:50
static SDL_Window * window
Definition: ffplay.c:362
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:63
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
Definition: utils.c:4260
SDL_mutex * mutex
Definition: ffplay.c:125
static av_const double hypot(double x, double y)
Definition: libm.h:366
int audio_write_buf_size
Definition: ffplay.c:249
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
Definition: format.c:118
int linesize[4]
Definition: avcodec.h:3847
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:131
int channels
number of audio channels, only used for audio.
Definition: frame.h:523
#define SUBPICTURE_QUEUE_SIZE
Definition: ffplay.c:130
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:301
struct MyAVPacketList * next
Definition: ffplay.c:114
#define AV_CH_LAYOUT_STEREO_DOWNMIX
#define NAN
Definition: mathematics.h:64
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
#define FFMIN(a, b)
Definition: common.h:96
SDL_mutex * mutex
Definition: ffplay.c:177
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:400
int windex
Definition: ffplay.c:172
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:156
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:556
static int cursor_hidden
Definition: ffplay.c:345
static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
Definition: ffplay.c:576
AVSubtitle sub
Definition: ffplay.c:156
int width
picture width / height.
Definition: avcodec.h:1693
uint8_t w
Definition: llviddspenc.c:38
int main(int argc, char **argv)
Definition: ffplay.c:3662
int height
Definition: ffplay.c:162
#define SDL_VOLUME_STEP
Definition: ffplay.c:77
static void show_usage(void)
Definition: ffplay.c:3619
int nb_packets
Definition: ffplay.c:120
static int opt_width(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3467
static int get_video_frame(VideoState *is, AVFrame *frame)
Definition: ffplay.c:1763
int frame_drops_late
Definition: ffplay.c:259
struct AudioParams audio_src
Definition: ffplay.c:252
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
Definition: ffplay.c:3217
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1400
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:74
AVFormatContext * ctx
Definition: movenc.c:48
struct SwrContext * swr_alloc_set_opts(struct SwrContext *s, int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
Definition: swresample.c:59
static int fast
Definition: ffplay.c:329
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2306
int last_i_start
Definition: ffplay.c:266
uint16_t format
Definition: avcodec.h:3864
#define s(width, name)
Definition: cbs_vp9.c:257
#define OPT_INT64
Definition: cmdutils.h:170
MyAVPacketList * last_pkt
Definition: ffplay.c:119
Definition: avfft.h:72
void av_rdft_end(RDFTContext *s)
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1517
int n
Definition: avisynth_c.h:684
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2331
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
static int is_full_screen
Definition: ffplay.c:355
#define AV_SYNC_THRESHOLD_MAX
Definition: ffplay.c:82
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:822
static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue)
Definition: ffplay.c:2719
static int startup_volume
Definition: ffplay.c:324
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:72
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:476
uint8_t * data[4]
data+linesize for the bitmap of this subtitle.
Definition: avcodec.h:3846
static int decoder_reorder_pts
Definition: ffplay.c:332
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:97
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1394
int16_t sample_array[SAMPLE_ARRAY_SIZE]
Definition: ffplay.c:264
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:1008
int paused
Definition: ffplay.c:149
static const char * input_filename
Definition: ffplay.c:311
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
#define FF_ARRAY_ELEMS(a)
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:842
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:1135
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:512
static Frame * frame_queue_peek_last(FrameQueue *f)
Definition: ffplay.c:735
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3626
int64_t pos
Definition: ffplay.c:160
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
int last_audio_stream
Definition: ffplay.c:304
Stream structure.
Definition: avformat.h:874
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: decode.c:1985
const AVClass * avfilter_get_class(void)
Definition: avfilter.c:1620
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:1003
#define SAMPLE_CORRECTION_PERCENT_MAX
Definition: ffplay.c:89
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:291
#define AV_PIX_FMT_BGR555
Definition: pixfmt.h:372
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:681
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:354
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:4972
char * filename
Definition: ffplay.c:291
static int screen_height
Definition: ffplay.c:316
static int opt_duration(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3516
sample_rate
static AVInputFormat * iformat
Definition: ffprobe.c:253
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
int64_t next_pts
Definition: ffplay.c:198
static int autoexit
Definition: ffplay.c:333
AVFrame * frame
Definition: ffplay.c:155
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
int serial
Definition: ffplay.c:148
int uploaded
Definition: ffplay.c:165
enum AVMediaType codec_type
Definition: avcodec.h:1529
int(* read_seek)(struct AVFormatContext *, int stream_index, int64_t timestamp, int flags)
Seek to a given timestamp relative to the frames in stream component stream_index.
Definition: avformat.h:739
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
Definition: ffplay.c:857
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:171
enum AVCodecID codec_id
Definition: avcodec.h:1531
static void do_exit(VideoState *is)
Definition: ffplay.c:1297
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int sample_rate
samples per second
Definition: avcodec.h:2176
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:249
AVIOContext * pb
I/O context.
Definition: avformat.h:1393
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:904
static int loop
Definition: ffplay.c:336
int last_paused
Definition: ffplay.c:209
static int exit_on_keydown
Definition: ffplay.c:334
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
FFT functions.
main external API structure.
Definition: avcodec.h:1521
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: allcodecs.c:871
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:592
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:352
Decoder subdec
Definition: ffplay.c:229
static void set_sdl_yuv_conversion_mode(AVFrame *frame)
Definition: ffplay.c:957
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
Definition: swresample.c:137
double max_frame_duration
Definition: ffplay.c:286
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1050
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:306
Clock vidclk
Definition: ffplay.c:220
int x
Definition: f_ebur128.c:91
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
#define fp
Definition: regdef.h:44