FFmpeg
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
35 #include "libavutil/eval.h"
36 #include "libavutil/mathematics.h"
37 #include "libavutil/pixdesc.h"
38 #include "libavutil/imgutils.h"
39 #include "libavutil/dict.h"
40 #include "libavutil/fifo.h"
41 #include "libavutil/parseutils.h"
42 #include "libavutil/samplefmt.h"
43 #include "libavutil/time.h"
44 #include "libavutil/bprint.h"
45 #include "libavformat/avformat.h"
46 #include "libavdevice/avdevice.h"
47 #include "libswscale/swscale.h"
48 #include "libavutil/opt.h"
49 #include "libavcodec/avfft.h"
51 
52 #if CONFIG_AVFILTER
53 # include "libavfilter/avfilter.h"
54 # include "libavfilter/buffersink.h"
55 # include "libavfilter/buffersrc.h"
56 #endif
57 
58 #include <SDL.h>
59 #include <SDL_thread.h>
60 
61 #include "cmdutils.h"
62 
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65 
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 25
68 #define EXTERNAL_CLOCK_MIN_FRAMES 2
69 #define EXTERNAL_CLOCK_MAX_FRAMES 10
70 
71 /* Minimum SDL audio buffer size, in samples. */
72 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
73 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
74 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
75 
76 /* Step size for volume control in dB */
77 #define SDL_VOLUME_STEP (0.75)
78 
79 /* no AV sync correction is done if below the minimum AV sync threshold */
80 #define AV_SYNC_THRESHOLD_MIN 0.04
81 /* AV sync correction is done if above the maximum AV sync threshold */
82 #define AV_SYNC_THRESHOLD_MAX 0.1
83 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
84 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
85 /* no AV correction is done if too big error */
86 #define AV_NOSYNC_THRESHOLD 10.0
87 
88 /* maximum audio speed change to get correct sync */
89 #define SAMPLE_CORRECTION_PERCENT_MAX 10
90 
91 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
92 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
93 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
94 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
95 
96 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
97 #define AUDIO_DIFF_AVG_NB 20
98 
99 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
100 #define REFRESH_RATE 0.01
101 
102 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
103 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
104 #define SAMPLE_ARRAY_SIZE (8 * 65536)
105 
106 #define CURSOR_HIDE_DELAY 1000000
107 
108 #define USE_ONEPASS_SUBTITLE_RENDER 1
109 
110 static unsigned sws_flags = SWS_BICUBIC;
111 
112 typedef struct MyAVPacketList {
114  int serial;
116 
117 typedef struct PacketQueue {
120  int size;
121  int64_t duration;
123  int serial;
124  SDL_mutex *mutex;
125  SDL_cond *cond;
126 } PacketQueue;
127 
128 #define VIDEO_PICTURE_QUEUE_SIZE 3
129 #define SUBPICTURE_QUEUE_SIZE 16
130 #define SAMPLE_QUEUE_SIZE 9
131 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
132 
133 typedef struct AudioParams {
134  int freq;
135  int channels;
136  int64_t channel_layout;
140 } AudioParams;
141 
142 typedef struct Clock {
143  double pts; /* clock base */
144  double pts_drift; /* clock base minus time at which we updated the clock */
145  double last_updated;
146  double speed;
147  int serial; /* clock is based on a packet with this serial */
148  int paused;
149  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
150 } Clock;
151 
152 /* Common struct for handling all types of decoded data and allocated render buffers. */
153 typedef struct Frame {
156  int serial;
157  double pts; /* presentation timestamp for the frame */
158  double duration; /* estimated duration of the frame */
159  int64_t pos; /* byte position of the frame in the input file */
160  int width;
161  int height;
162  int format;
164  int uploaded;
165  int flip_v;
166 } Frame;
167 
168 typedef struct FrameQueue {
170  int rindex;
171  int windex;
172  int size;
173  int max_size;
176  SDL_mutex *mutex;
177  SDL_cond *cond;
179 } FrameQueue;
180 
181 enum {
182  AV_SYNC_AUDIO_MASTER, /* default choice */
184  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
185 };
186 
187 typedef struct Decoder {
192  int finished;
194  SDL_cond *empty_queue_cond;
195  int64_t start_pts;
197  int64_t next_pts;
199  SDL_Thread *decoder_tid;
200 } Decoder;
201 
202 typedef struct VideoState {
203  SDL_Thread *read_tid;
207  int paused;
210  int seek_req;
212  int64_t seek_pos;
213  int64_t seek_rel;
216  int realtime;
217 
221 
225 
229 
231 
233 
234  double audio_clock;
236  double audio_diff_cum; /* used for AV difference average computation */
243  uint8_t *audio_buf;
244  uint8_t *audio_buf1;
245  unsigned int audio_buf_size; /* in bytes */
246  unsigned int audio_buf1_size;
247  int audio_buf_index; /* in bytes */
250  int muted;
252 #if CONFIG_AVFILTER
253  struct AudioParams audio_filter_src;
254 #endif
259 
260  enum ShowMode {
262  } show_mode;
269  int xpos;
271  SDL_Texture *vis_texture;
272  SDL_Texture *sub_texture;
273  SDL_Texture *vid_texture;
274 
278 
279  double frame_timer;
285  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
288  int eof;
289 
290  char *filename;
292  int step;
293 
294 #if CONFIG_AVFILTER
295  int vfilter_idx;
296  AVFilterContext *in_video_filter; // the first filter in the video chain
297  AVFilterContext *out_video_filter; // the last filter in the video chain
298  AVFilterContext *in_audio_filter; // the first filter in the audio chain
299  AVFilterContext *out_audio_filter; // the last filter in the audio chain
300  AVFilterGraph *agraph; // audio filter graph
301 #endif
302 
304 
306 } VideoState;
307 
308 /* options specified by the user */
310 static const char *input_filename;
311 static const char *window_title;
312 static int default_width = 640;
313 static int default_height = 480;
314 static int screen_width = 0;
315 static int screen_height = 0;
316 static int screen_left = SDL_WINDOWPOS_CENTERED;
317 static int screen_top = SDL_WINDOWPOS_CENTERED;
318 static int audio_disable;
319 static int video_disable;
320 static int subtitle_disable;
321 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
322 static int seek_by_bytes = -1;
323 static float seek_interval = 10;
324 static int display_disable;
325 static int borderless;
326 static int alwaysontop;
327 static int startup_volume = 100;
328 static int show_status = -1;
330 static int64_t start_time = AV_NOPTS_VALUE;
331 static int64_t duration = AV_NOPTS_VALUE;
332 static int fast = 0;
333 static int genpts = 0;
334 static int lowres = 0;
335 static int decoder_reorder_pts = -1;
336 static int autoexit;
337 static int exit_on_keydown;
338 static int exit_on_mousedown;
339 static int loop = 1;
340 static int framedrop = -1;
341 static int infinite_buffer = -1;
342 static enum ShowMode show_mode = SHOW_MODE_NONE;
343 static const char *audio_codec_name;
344 static const char *subtitle_codec_name;
345 static const char *video_codec_name;
346 double rdftspeed = 0.02;
347 static int64_t cursor_last_shown;
348 static int cursor_hidden = 0;
349 #if CONFIG_AVFILTER
350 static const char **vfilters_list = NULL;
351 static int nb_vfilters = 0;
352 static char *afilters = NULL;
353 #endif
354 static int autorotate = 1;
355 static int find_stream_info = 1;
356 static int filter_nbthreads = 0;
357 
358 /* current context */
359 static int is_full_screen;
360 static int64_t audio_callback_time;
361 
362 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
363 
364 static SDL_Window *window;
365 static SDL_Renderer *renderer;
366 static SDL_RendererInfo renderer_info = {0};
367 static SDL_AudioDeviceID audio_dev;
368 
369 static const struct TextureFormatEntry {
373  { AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
374  { AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
375  { AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
376  { AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
377  { AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
378  { AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
379  { AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
380  { AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
381  { AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
382  { AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
383  { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
384  { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
385  { AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
386  { AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
387  { AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
388  { AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
389  { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
390  { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
391  { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
392  { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
393 };
394 
395 #if CONFIG_AVFILTER
396 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
397 {
398  GROW_ARRAY(vfilters_list, nb_vfilters);
399  vfilters_list[nb_vfilters - 1] = arg;
400  return 0;
401 }
402 #endif
403 
404 static inline
405 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
406  enum AVSampleFormat fmt2, int64_t channel_count2)
407 {
408  /* If channel count == 1, planar and non-planar formats are the same */
409  if (channel_count1 == 1 && channel_count2 == 1)
411  else
412  return channel_count1 != channel_count2 || fmt1 != fmt2;
413 }
414 
415 static inline
416 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
417 {
418  if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
419  return channel_layout;
420  else
421  return 0;
422 }
423 
425 {
426  MyAVPacketList pkt1;
427 
428  if (q->abort_request)
429  return -1;
430 
431  if (av_fifo_space(q->pkt_list) < sizeof(pkt1)) {
432  if (av_fifo_grow(q->pkt_list, sizeof(pkt1)) < 0)
433  return -1;
434  }
435 
436  pkt1.pkt = pkt;
437  pkt1.serial = q->serial;
438 
439  av_fifo_generic_write(q->pkt_list, &pkt1, sizeof(pkt1), NULL);
440  q->nb_packets++;
441  q->size += pkt1.pkt->size + sizeof(pkt1);
442  q->duration += pkt1.pkt->duration;
443  /* XXX: should duplicate packet data in DV case */
444  SDL_CondSignal(q->cond);
445  return 0;
446 }
447 
449 {
450  AVPacket *pkt1;
451  int ret;
452 
453  pkt1 = av_packet_alloc();
454  if (!pkt1) {
456  return -1;
457  }
458  av_packet_move_ref(pkt1, pkt);
459 
460  SDL_LockMutex(q->mutex);
461  ret = packet_queue_put_private(q, pkt1);
462  SDL_UnlockMutex(q->mutex);
463 
464  if (ret < 0)
465  av_packet_free(&pkt1);
466 
467  return ret;
468 }
469 
470 static int packet_queue_put_nullpacket(PacketQueue *q, AVPacket *pkt, int stream_index)
471 {
472  pkt->stream_index = stream_index;
473  return packet_queue_put(q, pkt);
474 }
475 
476 /* packet queue handling */
478 {
479  memset(q, 0, sizeof(PacketQueue));
480  q->pkt_list = av_fifo_alloc(sizeof(MyAVPacketList));
481  if (!q->pkt_list)
482  return AVERROR(ENOMEM);
483  q->mutex = SDL_CreateMutex();
484  if (!q->mutex) {
485  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
486  return AVERROR(ENOMEM);
487  }
488  q->cond = SDL_CreateCond();
489  if (!q->cond) {
490  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
491  return AVERROR(ENOMEM);
492  }
493  q->abort_request = 1;
494  return 0;
495 }
496 
498 {
499  MyAVPacketList pkt1;
500 
501  SDL_LockMutex(q->mutex);
502  while (av_fifo_size(q->pkt_list) >= sizeof(pkt1)) {
503  av_fifo_generic_read(q->pkt_list, &pkt1, sizeof(pkt1), NULL);
504  av_packet_free(&pkt1.pkt);
505  }
506  q->nb_packets = 0;
507  q->size = 0;
508  q->duration = 0;
509  q->serial++;
510  SDL_UnlockMutex(q->mutex);
511 }
512 
514 {
516  av_fifo_freep(&q->pkt_list);
517  SDL_DestroyMutex(q->mutex);
518  SDL_DestroyCond(q->cond);
519 }
520 
522 {
523  SDL_LockMutex(q->mutex);
524 
525  q->abort_request = 1;
526 
527  SDL_CondSignal(q->cond);
528 
529  SDL_UnlockMutex(q->mutex);
530 }
531 
533 {
534  SDL_LockMutex(q->mutex);
535  q->abort_request = 0;
536  q->serial++;
537  SDL_UnlockMutex(q->mutex);
538 }
539 
540 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
541 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
542 {
543  MyAVPacketList pkt1;
544  int ret;
545 
546  SDL_LockMutex(q->mutex);
547 
548  for (;;) {
549  if (q->abort_request) {
550  ret = -1;
551  break;
552  }
553 
554  if (av_fifo_size(q->pkt_list) >= sizeof(pkt1)) {
555  av_fifo_generic_read(q->pkt_list, &pkt1, sizeof(pkt1), NULL);
556  q->nb_packets--;
557  q->size -= pkt1.pkt->size + sizeof(pkt1);
558  q->duration -= pkt1.pkt->duration;
559  av_packet_move_ref(pkt, pkt1.pkt);
560  if (serial)
561  *serial = pkt1.serial;
562  av_packet_free(&pkt1.pkt);
563  ret = 1;
564  break;
565  } else if (!block) {
566  ret = 0;
567  break;
568  } else {
569  SDL_CondWait(q->cond, q->mutex);
570  }
571  }
572  SDL_UnlockMutex(q->mutex);
573  return ret;
574 }
575 
576 static int decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
577  memset(d, 0, sizeof(Decoder));
578  d->pkt = av_packet_alloc();
579  if (!d->pkt)
580  return AVERROR(ENOMEM);
581  d->avctx = avctx;
582  d->queue = queue;
583  d->empty_queue_cond = empty_queue_cond;
584  d->start_pts = AV_NOPTS_VALUE;
585  d->pkt_serial = -1;
586  return 0;
587 }
588 
590  int ret = AVERROR(EAGAIN);
591 
592  for (;;) {
593  if (d->queue->serial == d->pkt_serial) {
594  do {
595  if (d->queue->abort_request)
596  return -1;
597 
598  switch (d->avctx->codec_type) {
599  case AVMEDIA_TYPE_VIDEO:
600  ret = avcodec_receive_frame(d->avctx, frame);
601  if (ret >= 0) {
602  if (decoder_reorder_pts == -1) {
603  frame->pts = frame->best_effort_timestamp;
604  } else if (!decoder_reorder_pts) {
605  frame->pts = frame->pkt_dts;
606  }
607  }
608  break;
609  case AVMEDIA_TYPE_AUDIO:
610  ret = avcodec_receive_frame(d->avctx, frame);
611  if (ret >= 0) {
612  AVRational tb = (AVRational){1, frame->sample_rate};
613  if (frame->pts != AV_NOPTS_VALUE)
614  frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);
615  else if (d->next_pts != AV_NOPTS_VALUE)
616  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
617  if (frame->pts != AV_NOPTS_VALUE) {
618  d->next_pts = frame->pts + frame->nb_samples;
619  d->next_pts_tb = tb;
620  }
621  }
622  break;
623  }
624  if (ret == AVERROR_EOF) {
625  d->finished = d->pkt_serial;
626  avcodec_flush_buffers(d->avctx);
627  return 0;
628  }
629  if (ret >= 0)
630  return 1;
631  } while (ret != AVERROR(EAGAIN));
632  }
633 
634  do {
635  if (d->queue->nb_packets == 0)
636  SDL_CondSignal(d->empty_queue_cond);
637  if (d->packet_pending) {
638  d->packet_pending = 0;
639  } else {
640  int old_serial = d->pkt_serial;
641  if (packet_queue_get(d->queue, d->pkt, 1, &d->pkt_serial) < 0)
642  return -1;
643  if (old_serial != d->pkt_serial) {
644  avcodec_flush_buffers(d->avctx);
645  d->finished = 0;
646  d->next_pts = d->start_pts;
647  d->next_pts_tb = d->start_pts_tb;
648  }
649  }
650  if (d->queue->serial == d->pkt_serial)
651  break;
652  av_packet_unref(d->pkt);
653  } while (1);
654 
655  if (d->avctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
656  int got_frame = 0;
657  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, d->pkt);
658  if (ret < 0) {
659  ret = AVERROR(EAGAIN);
660  } else {
661  if (got_frame && !d->pkt->data) {
662  d->packet_pending = 1;
663  }
664  ret = got_frame ? 0 : (d->pkt->data ? AVERROR(EAGAIN) : AVERROR_EOF);
665  }
666  av_packet_unref(d->pkt);
667  } else {
668  if (avcodec_send_packet(d->avctx, d->pkt) == AVERROR(EAGAIN)) {
669  av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
670  d->packet_pending = 1;
671  } else {
672  av_packet_unref(d->pkt);
673  }
674  }
675  }
676 }
677 
678 static void decoder_destroy(Decoder *d) {
679  av_packet_free(&d->pkt);
680  avcodec_free_context(&d->avctx);
681 }
682 
684 {
685  av_frame_unref(vp->frame);
686  avsubtitle_free(&vp->sub);
687 }
688 
689 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
690 {
691  int i;
692  memset(f, 0, sizeof(FrameQueue));
693  if (!(f->mutex = SDL_CreateMutex())) {
694  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
695  return AVERROR(ENOMEM);
696  }
697  if (!(f->cond = SDL_CreateCond())) {
698  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
699  return AVERROR(ENOMEM);
700  }
701  f->pktq = pktq;
702  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
703  f->keep_last = !!keep_last;
704  for (i = 0; i < f->max_size; i++)
705  if (!(f->queue[i].frame = av_frame_alloc()))
706  return AVERROR(ENOMEM);
707  return 0;
708 }
709 
711 {
712  int i;
713  for (i = 0; i < f->max_size; i++) {
714  Frame *vp = &f->queue[i];
716  av_frame_free(&vp->frame);
717  }
718  SDL_DestroyMutex(f->mutex);
719  SDL_DestroyCond(f->cond);
720 }
721 
723 {
724  SDL_LockMutex(f->mutex);
725  SDL_CondSignal(f->cond);
726  SDL_UnlockMutex(f->mutex);
727 }
728 
730 {
731  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
732 }
733 
735 {
736  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
737 }
738 
740 {
741  return &f->queue[f->rindex];
742 }
743 
745 {
746  /* wait until we have space to put a new frame */
747  SDL_LockMutex(f->mutex);
748  while (f->size >= f->max_size &&
749  !f->pktq->abort_request) {
750  SDL_CondWait(f->cond, f->mutex);
751  }
752  SDL_UnlockMutex(f->mutex);
753 
754  if (f->pktq->abort_request)
755  return NULL;
756 
757  return &f->queue[f->windex];
758 }
759 
761 {
762  /* wait until we have a readable a new frame */
763  SDL_LockMutex(f->mutex);
764  while (f->size - f->rindex_shown <= 0 &&
765  !f->pktq->abort_request) {
766  SDL_CondWait(f->cond, f->mutex);
767  }
768  SDL_UnlockMutex(f->mutex);
769 
770  if (f->pktq->abort_request)
771  return NULL;
772 
773  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
774 }
775 
777 {
778  if (++f->windex == f->max_size)
779  f->windex = 0;
780  SDL_LockMutex(f->mutex);
781  f->size++;
782  SDL_CondSignal(f->cond);
783  SDL_UnlockMutex(f->mutex);
784 }
785 
787 {
788  if (f->keep_last && !f->rindex_shown) {
789  f->rindex_shown = 1;
790  return;
791  }
792  frame_queue_unref_item(&f->queue[f->rindex]);
793  if (++f->rindex == f->max_size)
794  f->rindex = 0;
795  SDL_LockMutex(f->mutex);
796  f->size--;
797  SDL_CondSignal(f->cond);
798  SDL_UnlockMutex(f->mutex);
799 }
800 
801 /* return the number of undisplayed frames in the queue */
803 {
804  return f->size - f->rindex_shown;
805 }
806 
807 /* return last shown position */
809 {
810  Frame *fp = &f->queue[f->rindex];
811  if (f->rindex_shown && fp->serial == f->pktq->serial)
812  return fp->pos;
813  else
814  return -1;
815 }
816 
817 static void decoder_abort(Decoder *d, FrameQueue *fq)
818 {
819  packet_queue_abort(d->queue);
820  frame_queue_signal(fq);
821  SDL_WaitThread(d->decoder_tid, NULL);
822  d->decoder_tid = NULL;
823  packet_queue_flush(d->queue);
824 }
825 
826 static inline void fill_rectangle(int x, int y, int w, int h)
827 {
828  SDL_Rect rect;
829  rect.x = x;
830  rect.y = y;
831  rect.w = w;
832  rect.h = h;
833  if (w && h)
834  SDL_RenderFillRect(renderer, &rect);
835 }
836 
837 static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
838 {
839  Uint32 format;
840  int access, w, h;
841  if (!*texture || SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
842  void *pixels;
843  int pitch;
844  if (*texture)
845  SDL_DestroyTexture(*texture);
846  if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
847  return -1;
848  if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
849  return -1;
850  if (init_texture) {
851  if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
852  return -1;
853  memset(pixels, 0, pitch * new_height);
854  SDL_UnlockTexture(*texture);
855  }
856  av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
857  }
858  return 0;
859 }
860 
861 static void calculate_display_rect(SDL_Rect *rect,
862  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
863  int pic_width, int pic_height, AVRational pic_sar)
864 {
865  AVRational aspect_ratio = pic_sar;
866  int64_t width, height, x, y;
867 
868  if (av_cmp_q(aspect_ratio, av_make_q(0, 1)) <= 0)
869  aspect_ratio = av_make_q(1, 1);
870 
871  aspect_ratio = av_mul_q(aspect_ratio, av_make_q(pic_width, pic_height));
872 
873  /* XXX: we suppose the screen has a 1.0 pixel ratio */
874  height = scr_height;
875  width = av_rescale(height, aspect_ratio.num, aspect_ratio.den) & ~1;
876  if (width > scr_width) {
877  width = scr_width;
878  height = av_rescale(width, aspect_ratio.den, aspect_ratio.num) & ~1;
879  }
880  x = (scr_width - width) / 2;
881  y = (scr_height - height) / 2;
882  rect->x = scr_xleft + x;
883  rect->y = scr_ytop + y;
884  rect->w = FFMAX((int)width, 1);
885  rect->h = FFMAX((int)height, 1);
886 }
887 
888 static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
889 {
890  int i;
891  *sdl_blendmode = SDL_BLENDMODE_NONE;
892  *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
893  if (format == AV_PIX_FMT_RGB32 ||
897  *sdl_blendmode = SDL_BLENDMODE_BLEND;
898  for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
900  *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
901  return;
902  }
903  }
904 }
905 
906 static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx) {
907  int ret = 0;
908  Uint32 sdl_pix_fmt;
909  SDL_BlendMode sdl_blendmode;
910  get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
911  if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
912  return -1;
913  switch (sdl_pix_fmt) {
914  case SDL_PIXELFORMAT_UNKNOWN:
915  /* This should only happen if we are not using avfilter... */
916  *img_convert_ctx = sws_getCachedContext(*img_convert_ctx,
917  frame->width, frame->height, frame->format, frame->width, frame->height,
919  if (*img_convert_ctx != NULL) {
920  uint8_t *pixels[4];
921  int pitch[4];
922  if (!SDL_LockTexture(*tex, NULL, (void **)pixels, pitch)) {
923  sws_scale(*img_convert_ctx, (const uint8_t * const *)frame->data, frame->linesize,
924  0, frame->height, pixels, pitch);
925  SDL_UnlockTexture(*tex);
926  }
927  } else {
928  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
929  ret = -1;
930  }
931  break;
932  case SDL_PIXELFORMAT_IYUV:
933  if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
934  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
935  frame->data[1], frame->linesize[1],
936  frame->data[2], frame->linesize[2]);
937  } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
938  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
939  frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
940  frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
941  } else {
942  av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
943  return -1;
944  }
945  break;
946  default:
947  if (frame->linesize[0] < 0) {
948  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
949  } else {
950  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
951  }
952  break;
953  }
954  return ret;
955 }
956 
958 {
959 #if SDL_VERSION_ATLEAST(2,0,8)
960  SDL_YUV_CONVERSION_MODE mode = SDL_YUV_CONVERSION_AUTOMATIC;
961  if (frame && (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUYV422 || frame->format == AV_PIX_FMT_UYVY422)) {
962  if (frame->color_range == AVCOL_RANGE_JPEG)
963  mode = SDL_YUV_CONVERSION_JPEG;
964  else if (frame->colorspace == AVCOL_SPC_BT709)
965  mode = SDL_YUV_CONVERSION_BT709;
966  else if (frame->colorspace == AVCOL_SPC_BT470BG || frame->colorspace == AVCOL_SPC_SMPTE170M)
967  mode = SDL_YUV_CONVERSION_BT601;
968  }
969  SDL_SetYUVConversionMode(mode); /* FIXME: no support for linear transfer */
970 #endif
971 }
972 
974 {
975  Frame *vp;
976  Frame *sp = NULL;
977  SDL_Rect rect;
978 
979  vp = frame_queue_peek_last(&is->pictq);
980  if (is->subtitle_st) {
981  if (frame_queue_nb_remaining(&is->subpq) > 0) {
982  sp = frame_queue_peek(&is->subpq);
983 
984  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
985  if (!sp->uploaded) {
986  uint8_t* pixels[4];
987  int pitch[4];
988  int i;
989  if (!sp->width || !sp->height) {
990  sp->width = vp->width;
991  sp->height = vp->height;
992  }
993  if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
994  return;
995 
996  for (i = 0; i < sp->sub.num_rects; i++) {
997  AVSubtitleRect *sub_rect = sp->sub.rects[i];
998 
999  sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
1000  sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
1001  sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
1002  sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
1003 
1004  is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx,
1005  sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
1006  sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
1007  0, NULL, NULL, NULL);
1008  if (!is->sub_convert_ctx) {
1009  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1010  return;
1011  }
1012  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) {
1013  sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
1014  0, sub_rect->h, pixels, pitch);
1015  SDL_UnlockTexture(is->sub_texture);
1016  }
1017  }
1018  sp->uploaded = 1;
1019  }
1020  } else
1021  sp = NULL;
1022  }
1023  }
1024 
1025  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1026 
1027  if (!vp->uploaded) {
1028  if (upload_texture(&is->vid_texture, vp->frame, &is->img_convert_ctx) < 0)
1029  return;
1030  vp->uploaded = 1;
1031  vp->flip_v = vp->frame->linesize[0] < 0;
1032  }
1033 
1035  SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
1037  if (sp) {
1038 #if USE_ONEPASS_SUBTITLE_RENDER
1039  SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
1040 #else
1041  int i;
1042  double xratio = (double)rect.w / (double)sp->width;
1043  double yratio = (double)rect.h / (double)sp->height;
1044  for (i = 0; i < sp->sub.num_rects; i++) {
1045  SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
1046  SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
1047  .y = rect.y + sub_rect->y * yratio,
1048  .w = sub_rect->w * xratio,
1049  .h = sub_rect->h * yratio};
1050  SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
1051  }
1052 #endif
1053  }
1054 }
1055 
1056 static inline int compute_mod(int a, int b)
1057 {
1058  return a < 0 ? a%b + b : a%b;
1059 }
1060 
1062 {
1063  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1064  int ch, channels, h, h2;
1065  int64_t time_diff;
1066  int rdft_bits, nb_freq;
1067 
1068  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1069  ;
1070  nb_freq = 1 << (rdft_bits - 1);
1071 
1072  /* compute display index : center on currently output samples */
1073  channels = s->audio_tgt.channels;
1074  nb_display_channels = channels;
1075  if (!s->paused) {
1076  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1077  n = 2 * channels;
1078  delay = s->audio_write_buf_size;
1079  delay /= n;
1080 
1081  /* to be more precise, we take into account the time spent since
1082  the last buffer computation */
1083  if (audio_callback_time) {
1084  time_diff = av_gettime_relative() - audio_callback_time;
1085  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1086  }
1087 
1088  delay += 2 * data_used;
1089  if (delay < data_used)
1090  delay = data_used;
1091 
1092  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1093  if (s->show_mode == SHOW_MODE_WAVES) {
1094  h = INT_MIN;
1095  for (i = 0; i < 1000; i += channels) {
1096  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1097  int a = s->sample_array[idx];
1098  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1099  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1100  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1101  int score = a - d;
1102  if (h < score && (b ^ c) < 0) {
1103  h = score;
1104  i_start = idx;
1105  }
1106  }
1107  }
1108 
1109  s->last_i_start = i_start;
1110  } else {
1111  i_start = s->last_i_start;
1112  }
1113 
1114  if (s->show_mode == SHOW_MODE_WAVES) {
1115  SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
1116 
1117  /* total height for one channel */
1118  h = s->height / nb_display_channels;
1119  /* graph height / 2 */
1120  h2 = (h * 9) / 20;
1121  for (ch = 0; ch < nb_display_channels; ch++) {
1122  i = i_start + ch;
1123  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1124  for (x = 0; x < s->width; x++) {
1125  y = (s->sample_array[i] * h2) >> 15;
1126  if (y < 0) {
1127  y = -y;
1128  ys = y1 - y;
1129  } else {
1130  ys = y1;
1131  }
1132  fill_rectangle(s->xleft + x, ys, 1, y);
1133  i += channels;
1134  if (i >= SAMPLE_ARRAY_SIZE)
1135  i -= SAMPLE_ARRAY_SIZE;
1136  }
1137  }
1138 
1139  SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
1140 
1141  for (ch = 1; ch < nb_display_channels; ch++) {
1142  y = s->ytop + ch * h;
1143  fill_rectangle(s->xleft, y, s->width, 1);
1144  }
1145  } else {
1146  if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
1147  return;
1148 
1149  if (s->xpos >= s->width)
1150  s->xpos = 0;
1151  nb_display_channels= FFMIN(nb_display_channels, 2);
1152  if (rdft_bits != s->rdft_bits) {
1153  av_rdft_end(s->rdft);
1154  av_free(s->rdft_data);
1155  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
1156  s->rdft_bits = rdft_bits;
1157  s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
1158  }
1159  if (!s->rdft || !s->rdft_data){
1160  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1161  s->show_mode = SHOW_MODE_WAVES;
1162  } else {
1163  FFTSample *data[2];
1164  SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
1165  uint32_t *pixels;
1166  int pitch;
1167  for (ch = 0; ch < nb_display_channels; ch++) {
1168  data[ch] = s->rdft_data + 2 * nb_freq * ch;
1169  i = i_start + ch;
1170  for (x = 0; x < 2 * nb_freq; x++) {
1171  double w = (x-nb_freq) * (1.0 / nb_freq);
1172  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
1173  i += channels;
1174  if (i >= SAMPLE_ARRAY_SIZE)
1175  i -= SAMPLE_ARRAY_SIZE;
1176  }
1177  av_rdft_calc(s->rdft, data[ch]);
1178  }
1179  /* Least efficient way to do this, we should of course
1180  * directly access it but it is more than fast enough. */
1181  if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
1182  pitch >>= 2;
1183  pixels += pitch * s->height;
1184  for (y = 0; y < s->height; y++) {
1185  double w = 1 / sqrt(nb_freq);
1186  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1187  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][2 * y + 0], data[1][2 * y + 1]))
1188  : a;
1189  a = FFMIN(a, 255);
1190  b = FFMIN(b, 255);
1191  pixels -= pitch;
1192  *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
1193  }
1194  SDL_UnlockTexture(s->vis_texture);
1195  }
1196  SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
1197  }
1198  if (!s->paused)
1199  s->xpos++;
1200  }
1201 }
1202 
1203 static void stream_component_close(VideoState *is, int stream_index)
1204 {
1205  AVFormatContext *ic = is->ic;
1206  AVCodecParameters *codecpar;
1207 
1208  if (stream_index < 0 || stream_index >= ic->nb_streams)
1209  return;
1210  codecpar = ic->streams[stream_index]->codecpar;
1211 
1212  switch (codecpar->codec_type) {
1213  case AVMEDIA_TYPE_AUDIO:
1214  decoder_abort(&is->auddec, &is->sampq);
1215  SDL_CloseAudioDevice(audio_dev);
1216  decoder_destroy(&is->auddec);
1217  swr_free(&is->swr_ctx);
1218  av_freep(&is->audio_buf1);
1219  is->audio_buf1_size = 0;
1220  is->audio_buf = NULL;
1221 
1222  if (is->rdft) {
1223  av_rdft_end(is->rdft);
1224  av_freep(&is->rdft_data);
1225  is->rdft = NULL;
1226  is->rdft_bits = 0;
1227  }
1228  break;
1229  case AVMEDIA_TYPE_VIDEO:
1230  decoder_abort(&is->viddec, &is->pictq);
1231  decoder_destroy(&is->viddec);
1232  break;
1233  case AVMEDIA_TYPE_SUBTITLE:
1234  decoder_abort(&is->subdec, &is->subpq);
1235  decoder_destroy(&is->subdec);
1236  break;
1237  default:
1238  break;
1239  }
1240 
1241  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1242  switch (codecpar->codec_type) {
1243  case AVMEDIA_TYPE_AUDIO:
1244  is->audio_st = NULL;
1245  is->audio_stream = -1;
1246  break;
1247  case AVMEDIA_TYPE_VIDEO:
1248  is->video_st = NULL;
1249  is->video_stream = -1;
1250  break;
1251  case AVMEDIA_TYPE_SUBTITLE:
1252  is->subtitle_st = NULL;
1253  is->subtitle_stream = -1;
1254  break;
1255  default:
1256  break;
1257  }
1258 }
1259 
1261 {
1262  /* XXX: use a special url_shutdown call to abort parse cleanly */
1263  is->abort_request = 1;
1264  SDL_WaitThread(is->read_tid, NULL);
1265 
1266  /* close each stream */
1267  if (is->audio_stream >= 0)
1268  stream_component_close(is, is->audio_stream);
1269  if (is->video_stream >= 0)
1270  stream_component_close(is, is->video_stream);
1271  if (is->subtitle_stream >= 0)
1272  stream_component_close(is, is->subtitle_stream);
1273 
1274  avformat_close_input(&is->ic);
1275 
1276  packet_queue_destroy(&is->videoq);
1277  packet_queue_destroy(&is->audioq);
1278  packet_queue_destroy(&is->subtitleq);
1279 
1280  /* free all pictures */
1281  frame_queue_destory(&is->pictq);
1282  frame_queue_destory(&is->sampq);
1283  frame_queue_destory(&is->subpq);
1284  SDL_DestroyCond(is->continue_read_thread);
1285  sws_freeContext(is->img_convert_ctx);
1286  sws_freeContext(is->sub_convert_ctx);
1287  av_free(is->filename);
1288  if (is->vis_texture)
1289  SDL_DestroyTexture(is->vis_texture);
1290  if (is->vid_texture)
1291  SDL_DestroyTexture(is->vid_texture);
1292  if (is->sub_texture)
1293  SDL_DestroyTexture(is->sub_texture);
1294  av_free(is);
1295 }
1296 
1297 static void do_exit(VideoState *is)
1298 {
1299  if (is) {
1300  stream_close(is);
1301  }
1302  if (renderer)
1303  SDL_DestroyRenderer(renderer);
1304  if (window)
1305  SDL_DestroyWindow(window);
1306  uninit_opts();
1307 #if CONFIG_AVFILTER
1308  av_freep(&vfilters_list);
1309 #endif
1311  if (show_status)
1312  printf("\n");
1313  SDL_Quit();
1314  av_log(NULL, AV_LOG_QUIET, "%s", "");
1315  exit(0);
1316 }
1317 
1318 static void sigterm_handler(int sig)
1319 {
1320  exit(123);
1321 }
1322 
1324 {
1325  SDL_Rect rect;
1326  int max_width = screen_width ? screen_width : INT_MAX;
1327  int max_height = screen_height ? screen_height : INT_MAX;
1328  if (max_width == INT_MAX && max_height == INT_MAX)
1329  max_height = height;
1330  calculate_display_rect(&rect, 0, 0, max_width, max_height, width, height, sar);
1331  default_width = rect.w;
1332  default_height = rect.h;
1333 }
1334 
1336 {
1337  int w,h;
1338 
1341 
1342  if (!window_title)
1344  SDL_SetWindowTitle(window, window_title);
1345 
1346  SDL_SetWindowSize(window, w, h);
1347  SDL_SetWindowPosition(window, screen_left, screen_top);
1348  if (is_full_screen)
1349  SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1350  SDL_ShowWindow(window);
1351 
1352  is->width = w;
1353  is->height = h;
1354 
1355  return 0;
1356 }
1357 
1358 /* display the current picture, if any */
1360 {
1361  if (!is->width)
1362  video_open(is);
1363 
1364  SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1365  SDL_RenderClear(renderer);
1366  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1368  else if (is->video_st)
1370  SDL_RenderPresent(renderer);
1371 }
1372 
1373 static double get_clock(Clock *c)
1374 {
1375  if (*c->queue_serial != c->serial)
1376  return NAN;
1377  if (c->paused) {
1378  return c->pts;
1379  } else {
1380  double time = av_gettime_relative() / 1000000.0;
1381  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1382  }
1383 }
1384 
1385 static void set_clock_at(Clock *c, double pts, int serial, double time)
1386 {
1387  c->pts = pts;
1388  c->last_updated = time;
1389  c->pts_drift = c->pts - time;
1390  c->serial = serial;
1391 }
1392 
1393 static void set_clock(Clock *c, double pts, int serial)
1394 {
1395  double time = av_gettime_relative() / 1000000.0;
1396  set_clock_at(c, pts, serial, time);
1397 }
1398 
1399 static void set_clock_speed(Clock *c, double speed)
1400 {
1401  set_clock(c, get_clock(c), c->serial);
1402  c->speed = speed;
1403 }
1404 
1405 static void init_clock(Clock *c, int *queue_serial)
1406 {
1407  c->speed = 1.0;
1408  c->paused = 0;
1409  c->queue_serial = queue_serial;
1410  set_clock(c, NAN, -1);
1411 }
1412 
1413 static void sync_clock_to_slave(Clock *c, Clock *slave)
1414 {
1415  double clock = get_clock(c);
1416  double slave_clock = get_clock(slave);
1417  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1418  set_clock(c, slave_clock, slave->serial);
1419 }
1420 
1422  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1423  if (is->video_st)
1424  return AV_SYNC_VIDEO_MASTER;
1425  else
1426  return AV_SYNC_AUDIO_MASTER;
1427  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1428  if (is->audio_st)
1429  return AV_SYNC_AUDIO_MASTER;
1430  else
1431  return AV_SYNC_EXTERNAL_CLOCK;
1432  } else {
1433  return AV_SYNC_EXTERNAL_CLOCK;
1434  }
1435 }
1436 
1437 /* get the current master clock value */
1439 {
1440  double val;
1441 
1442  switch (get_master_sync_type(is)) {
1443  case AV_SYNC_VIDEO_MASTER:
1444  val = get_clock(&is->vidclk);
1445  break;
1446  case AV_SYNC_AUDIO_MASTER:
1447  val = get_clock(&is->audclk);
1448  break;
1449  default:
1450  val = get_clock(&is->extclk);
1451  break;
1452  }
1453  return val;
1454 }
1455 
1457  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1458  is->audio_stream >= 0 && is->audioq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES) {
1460  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1461  (is->audio_stream < 0 || is->audioq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES)) {
1463  } else {
1464  double speed = is->extclk.speed;
1465  if (speed != 1.0)
1466  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1467  }
1468 }
1469 
1470 /* seek in the stream */
1471 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1472 {
1473  if (!is->seek_req) {
1474  is->seek_pos = pos;
1475  is->seek_rel = rel;
1476  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1477  if (seek_by_bytes)
1478  is->seek_flags |= AVSEEK_FLAG_BYTE;
1479  is->seek_req = 1;
1480  SDL_CondSignal(is->continue_read_thread);
1481  }
1482 }
1483 
1484 /* pause or resume the video */
1486 {
1487  if (is->paused) {
1488  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1489  if (is->read_pause_return != AVERROR(ENOSYS)) {
1490  is->vidclk.paused = 0;
1491  }
1492  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1493  }
1494  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1495  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1496 }
1497 
1499 {
1501  is->step = 0;
1502 }
1503 
1505 {
1506  is->muted = !is->muted;
1507 }
1508 
1509 static void update_volume(VideoState *is, int sign, double step)
1510 {
1511  double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1512  int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0));
1513  is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1514 }
1515 
1517 {
1518  /* if the stream is paused unpause it, then step */
1519  if (is->paused)
1521  is->step = 1;
1522 }
1523 
1524 static double compute_target_delay(double delay, VideoState *is)
1525 {
1526  double sync_threshold, diff = 0;
1527 
1528  /* update delay to follow master synchronisation source */
1530  /* if video is slave, we try to correct big delays by
1531  duplicating or deleting a frame */
1532  diff = get_clock(&is->vidclk) - get_master_clock(is);
1533 
1534  /* skip or repeat frame. We take into account the
1535  delay to compute the threshold. I still don't know
1536  if it is the best guess */
1537  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1538  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1539  if (diff <= -sync_threshold)
1540  delay = FFMAX(0, delay + diff);
1541  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1542  delay = delay + diff;
1543  else if (diff >= sync_threshold)
1544  delay = 2 * delay;
1545  }
1546  }
1547 
1548  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1549  delay, -diff);
1550 
1551  return delay;
1552 }
1553 
1554 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1555  if (vp->serial == nextvp->serial) {
1556  double duration = nextvp->pts - vp->pts;
1557  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1558  return vp->duration;
1559  else
1560  return duration;
1561  } else {
1562  return 0.0;
1563  }
1564 }
1565 
1566 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1567  /* update current video pts */
1568  set_clock(&is->vidclk, pts, serial);
1569  sync_clock_to_slave(&is->extclk, &is->vidclk);
1570 }
1571 
1572 /* called to display each frame */
1573 static void video_refresh(void *opaque, double *remaining_time)
1574 {
1575  VideoState *is = opaque;
1576  double time;
1577 
1578  Frame *sp, *sp2;
1579 
1580  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1582 
1583  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1584  time = av_gettime_relative() / 1000000.0;
1585  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1586  video_display(is);
1587  is->last_vis_time = time;
1588  }
1589  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1590  }
1591 
1592  if (is->video_st) {
1593 retry:
1594  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1595  // nothing to do, no picture to display in the queue
1596  } else {
1597  double last_duration, duration, delay;
1598  Frame *vp, *lastvp;
1599 
1600  /* dequeue the picture */
1601  lastvp = frame_queue_peek_last(&is->pictq);
1602  vp = frame_queue_peek(&is->pictq);
1603 
1604  if (vp->serial != is->videoq.serial) {
1605  frame_queue_next(&is->pictq);
1606  goto retry;
1607  }
1608 
1609  if (lastvp->serial != vp->serial)
1610  is->frame_timer = av_gettime_relative() / 1000000.0;
1611 
1612  if (is->paused)
1613  goto display;
1614 
1615  /* compute nominal last_duration */
1616  last_duration = vp_duration(is, lastvp, vp);
1617  delay = compute_target_delay(last_duration, is);
1618 
1619  time= av_gettime_relative()/1000000.0;
1620  if (time < is->frame_timer + delay) {
1621  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1622  goto display;
1623  }
1624 
1625  is->frame_timer += delay;
1626  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1627  is->frame_timer = time;
1628 
1629  SDL_LockMutex(is->pictq.mutex);
1630  if (!isnan(vp->pts))
1631  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1632  SDL_UnlockMutex(is->pictq.mutex);
1633 
1634  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1635  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1636  duration = vp_duration(is, vp, nextvp);
1637  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1638  is->frame_drops_late++;
1639  frame_queue_next(&is->pictq);
1640  goto retry;
1641  }
1642  }
1643 
1644  if (is->subtitle_st) {
1645  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1646  sp = frame_queue_peek(&is->subpq);
1647 
1648  if (frame_queue_nb_remaining(&is->subpq) > 1)
1649  sp2 = frame_queue_peek_next(&is->subpq);
1650  else
1651  sp2 = NULL;
1652 
1653  if (sp->serial != is->subtitleq.serial
1654  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1655  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1656  {
1657  if (sp->uploaded) {
1658  int i;
1659  for (i = 0; i < sp->sub.num_rects; i++) {
1660  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1661  uint8_t *pixels;
1662  int pitch, j;
1663 
1664  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
1665  for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1666  memset(pixels, 0, sub_rect->w << 2);
1667  SDL_UnlockTexture(is->sub_texture);
1668  }
1669  }
1670  }
1671  frame_queue_next(&is->subpq);
1672  } else {
1673  break;
1674  }
1675  }
1676  }
1677 
1678  frame_queue_next(&is->pictq);
1679  is->force_refresh = 1;
1680 
1681  if (is->step && !is->paused)
1683  }
1684 display:
1685  /* display picture */
1686  if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1687  video_display(is);
1688  }
1689  is->force_refresh = 0;
1690  if (show_status) {
1691  AVBPrint buf;
1692  static int64_t last_time;
1693  int64_t cur_time;
1694  int aqsize, vqsize, sqsize;
1695  double av_diff;
1696 
1697  cur_time = av_gettime_relative();
1698  if (!last_time || (cur_time - last_time) >= 30000) {
1699  aqsize = 0;
1700  vqsize = 0;
1701  sqsize = 0;
1702  if (is->audio_st)
1703  aqsize = is->audioq.size;
1704  if (is->video_st)
1705  vqsize = is->videoq.size;
1706  if (is->subtitle_st)
1707  sqsize = is->subtitleq.size;
1708  av_diff = 0;
1709  if (is->audio_st && is->video_st)
1710  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1711  else if (is->video_st)
1712  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1713  else if (is->audio_st)
1714  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1715 
1717  av_bprintf(&buf,
1718  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1720  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1721  av_diff,
1722  is->frame_drops_early + is->frame_drops_late,
1723  aqsize / 1024,
1724  vqsize / 1024,
1725  sqsize,
1726  is->video_st ? is->viddec.avctx->pts_correction_num_faulty_dts : 0,
1727  is->video_st ? is->viddec.avctx->pts_correction_num_faulty_pts : 0);
1728 
1729  if (show_status == 1 && AV_LOG_INFO > av_log_get_level())
1730  fprintf(stderr, "%s", buf.str);
1731  else
1732  av_log(NULL, AV_LOG_INFO, "%s", buf.str);
1733 
1734  fflush(stderr);
1735  av_bprint_finalize(&buf, NULL);
1736 
1737  last_time = cur_time;
1738  }
1739  }
1740 }
1741 
1742 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1743 {
1744  Frame *vp;
1745 
1746 #if defined(DEBUG_SYNC)
1747  printf("frame_type=%c pts=%0.3f\n",
1748  av_get_picture_type_char(src_frame->pict_type), pts);
1749 #endif
1750 
1751  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1752  return -1;
1753 
1754  vp->sar = src_frame->sample_aspect_ratio;
1755  vp->uploaded = 0;
1756 
1757  vp->width = src_frame->width;
1758  vp->height = src_frame->height;
1759  vp->format = src_frame->format;
1760 
1761  vp->pts = pts;
1762  vp->duration = duration;
1763  vp->pos = pos;
1764  vp->serial = serial;
1765 
1766  set_default_window_size(vp->width, vp->height, vp->sar);
1767 
1768  av_frame_move_ref(vp->frame, src_frame);
1769  frame_queue_push(&is->pictq);
1770  return 0;
1771 }
1772 
1774 {
1775  int got_picture;
1776 
1777  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1778  return -1;
1779 
1780  if (got_picture) {
1781  double dpts = NAN;
1782 
1783  if (frame->pts != AV_NOPTS_VALUE)
1784  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1785 
1786  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1787 
1789  if (frame->pts != AV_NOPTS_VALUE) {
1790  double diff = dpts - get_master_clock(is);
1791  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1792  diff - is->frame_last_filter_delay < 0 &&
1793  is->viddec.pkt_serial == is->vidclk.serial &&
1794  is->videoq.nb_packets) {
1795  is->frame_drops_early++;
1797  got_picture = 0;
1798  }
1799  }
1800  }
1801  }
1802 
1803  return got_picture;
1804 }
1805 
1806 #if CONFIG_AVFILTER
1807 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1808  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1809 {
1810  int ret, i;
1811  int nb_filters = graph->nb_filters;
1813 
1814  if (filtergraph) {
1817  if (!outputs || !inputs) {
1818  ret = AVERROR(ENOMEM);
1819  goto fail;
1820  }
1821 
1822  outputs->name = av_strdup("in");
1823  outputs->filter_ctx = source_ctx;
1824  outputs->pad_idx = 0;
1825  outputs->next = NULL;
1826 
1827  inputs->name = av_strdup("out");
1828  inputs->filter_ctx = sink_ctx;
1829  inputs->pad_idx = 0;
1830  inputs->next = NULL;
1831 
1832  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1833  goto fail;
1834  } else {
1835  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1836  goto fail;
1837  }
1838 
1839  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1840  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1841  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1842 
1844 fail:
1847  return ret;
1848 }
1849 
1850 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1851 {
1853  char sws_flags_str[512] = "";
1854  char buffersrc_args[256];
1855  int ret;
1856  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1857  AVCodecParameters *codecpar = is->video_st->codecpar;
1858  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1859  AVDictionaryEntry *e = NULL;
1860  int nb_pix_fmts = 0;
1861  int i, j;
1862 
1863  for (i = 0; i < renderer_info.num_texture_formats; i++) {
1864  for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; j++) {
1865  if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
1866  pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
1867  break;
1868  }
1869  }
1870  }
1871  pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE;
1872 
1873  while ((e = av_dict_get(sws_dict, "", e, AV_DICT_IGNORE_SUFFIX))) {
1874  if (!strcmp(e->key, "sws_flags")) {
1875  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1876  } else
1877  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1878  }
1879  if (strlen(sws_flags_str))
1880  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1881 
1882  graph->scale_sws_opts = av_strdup(sws_flags_str);
1883 
1884  snprintf(buffersrc_args, sizeof(buffersrc_args),
1885  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1886  frame->width, frame->height, frame->format,
1887  is->video_st->time_base.num, is->video_st->time_base.den,
1888  codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1));
1889  if (fr.num && fr.den)
1890  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1891 
1892  if ((ret = avfilter_graph_create_filter(&filt_src,
1893  avfilter_get_by_name("buffer"),
1894  "ffplay_buffer", buffersrc_args, NULL,
1895  graph)) < 0)
1896  goto fail;
1897 
1898  ret = avfilter_graph_create_filter(&filt_out,
1899  avfilter_get_by_name("buffersink"),
1900  "ffplay_buffersink", NULL, NULL, graph);
1901  if (ret < 0)
1902  goto fail;
1903 
1904  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1905  goto fail;
1906 
1907  last_filter = filt_out;
1908 
1909 /* Note: this macro adds a filter before the lastly added filter, so the
1910  * processing order of the filters is in reverse */
1911 #define INSERT_FILT(name, arg) do { \
1912  AVFilterContext *filt_ctx; \
1913  \
1914  ret = avfilter_graph_create_filter(&filt_ctx, \
1915  avfilter_get_by_name(name), \
1916  "ffplay_" name, arg, NULL, graph); \
1917  if (ret < 0) \
1918  goto fail; \
1919  \
1920  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1921  if (ret < 0) \
1922  goto fail; \
1923  \
1924  last_filter = filt_ctx; \
1925 } while (0)
1926 
1927  if (autorotate) {
1928  int32_t *displaymatrix = (int32_t *)av_stream_get_side_data(is->video_st, AV_PKT_DATA_DISPLAYMATRIX, NULL);
1929  double theta = get_rotation(displaymatrix);
1930 
1931  if (fabs(theta - 90) < 1.0) {
1932  INSERT_FILT("transpose", "clock");
1933  } else if (fabs(theta - 180) < 1.0) {
1934  INSERT_FILT("hflip", NULL);
1935  INSERT_FILT("vflip", NULL);
1936  } else if (fabs(theta - 270) < 1.0) {
1937  INSERT_FILT("transpose", "cclock");
1938  } else if (fabs(theta) > 1.0) {
1939  char rotate_buf[64];
1940  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1941  INSERT_FILT("rotate", rotate_buf);
1942  }
1943  }
1944 
1945  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1946  goto fail;
1947 
1948  is->in_video_filter = filt_src;
1949  is->out_video_filter = filt_out;
1950 
1951 fail:
1952  return ret;
1953 }
1954 
1955 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1956 {
1958  int sample_rates[2] = { 0, -1 };
1959  int64_t channel_layouts[2] = { 0, -1 };
1960  int channels[2] = { 0, -1 };
1961  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1962  char aresample_swr_opts[512] = "";
1963  AVDictionaryEntry *e = NULL;
1964  char asrc_args[256];
1965  int ret;
1966 
1967  avfilter_graph_free(&is->agraph);
1968  if (!(is->agraph = avfilter_graph_alloc()))
1969  return AVERROR(ENOMEM);
1970  is->agraph->nb_threads = filter_nbthreads;
1971 
1972  while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
1973  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
1974  if (strlen(aresample_swr_opts))
1975  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
1976  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
1977 
1978  ret = snprintf(asrc_args, sizeof(asrc_args),
1979  "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1980  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1981  is->audio_filter_src.channels,
1982  1, is->audio_filter_src.freq);
1983  if (is->audio_filter_src.channel_layout)
1984  snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1985  ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
1986 
1987  ret = avfilter_graph_create_filter(&filt_asrc,
1988  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1989  asrc_args, NULL, is->agraph);
1990  if (ret < 0)
1991  goto end;
1992 
1993 
1994  ret = avfilter_graph_create_filter(&filt_asink,
1995  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1996  NULL, NULL, is->agraph);
1997  if (ret < 0)
1998  goto end;
1999 
2000  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
2001  goto end;
2002  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
2003  goto end;
2004 
2005  if (force_output_format) {
2006  channel_layouts[0] = is->audio_tgt.channel_layout;
2007  channels [0] = is->audio_tgt.channel_layout ? -1 : is->audio_tgt.channels;
2008  sample_rates [0] = is->audio_tgt.freq;
2009  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
2010  goto end;
2011  if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2012  goto end;
2013  if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2014  goto end;
2015  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2016  goto end;
2017  }
2018 
2019 
2020  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2021  goto end;
2022 
2023  is->in_audio_filter = filt_asrc;
2024  is->out_audio_filter = filt_asink;
2025 
2026 end:
2027  if (ret < 0)
2028  avfilter_graph_free(&is->agraph);
2029  return ret;
2030 }
2031 #endif /* CONFIG_AVFILTER */
2032 
2033 static int audio_thread(void *arg)
2034 {
2035  VideoState *is = arg;
2037  Frame *af;
2038 #if CONFIG_AVFILTER
2039  int last_serial = -1;
2040  int64_t dec_channel_layout;
2041  int reconfigure;
2042 #endif
2043  int got_frame = 0;
2044  AVRational tb;
2045  int ret = 0;
2046 
2047  if (!frame)
2048  return AVERROR(ENOMEM);
2049 
2050  do {
2051  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2052  goto the_end;
2053 
2054  if (got_frame) {
2055  tb = (AVRational){1, frame->sample_rate};
2056 
2057 #if CONFIG_AVFILTER
2058  dec_channel_layout = get_valid_channel_layout(frame->channel_layout, frame->channels);
2059 
2060  reconfigure =
2061  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2062  frame->format, frame->channels) ||
2063  is->audio_filter_src.channel_layout != dec_channel_layout ||
2064  is->audio_filter_src.freq != frame->sample_rate ||
2065  is->auddec.pkt_serial != last_serial;
2066 
2067  if (reconfigure) {
2068  char buf1[1024], buf2[1024];
2069  av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2070  av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2072  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2073  is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2074  frame->sample_rate, frame->channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
2075 
2076  is->audio_filter_src.fmt = frame->format;
2077  is->audio_filter_src.channels = frame->channels;
2078  is->audio_filter_src.channel_layout = dec_channel_layout;
2079  is->audio_filter_src.freq = frame->sample_rate;
2080  last_serial = is->auddec.pkt_serial;
2081 
2082  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2083  goto the_end;
2084  }
2085 
2086  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2087  goto the_end;
2088 
2089  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2090  tb = av_buffersink_get_time_base(is->out_audio_filter);
2091 #endif
2092  if (!(af = frame_queue_peek_writable(&is->sampq)))
2093  goto the_end;
2094 
2095  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2096  af->pos = frame->pkt_pos;
2097  af->serial = is->auddec.pkt_serial;
2098  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2099 
2101  frame_queue_push(&is->sampq);
2102 
2103 #if CONFIG_AVFILTER
2104  if (is->audioq.serial != is->auddec.pkt_serial)
2105  break;
2106  }
2107  if (ret == AVERROR_EOF)
2108  is->auddec.finished = is->auddec.pkt_serial;
2109 #endif
2110  }
2111  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2112  the_end:
2113 #if CONFIG_AVFILTER
2114  avfilter_graph_free(&is->agraph);
2115 #endif
2116  av_frame_free(&frame);
2117  return ret;
2118 }
2119 
2120 static int decoder_start(Decoder *d, int (*fn)(void *), const char *thread_name, void* arg)
2121 {
2122  packet_queue_start(d->queue);
2123  d->decoder_tid = SDL_CreateThread(fn, thread_name, arg);
2124  if (!d->decoder_tid) {
2125  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2126  return AVERROR(ENOMEM);
2127  }
2128  return 0;
2129 }
2130 
2131 static int video_thread(void *arg)
2132 {
2133  VideoState *is = arg;
2135  double pts;
2136  double duration;
2137  int ret;
2138  AVRational tb = is->video_st->time_base;
2139  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2140 
2141 #if CONFIG_AVFILTER
2143  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2144  int last_w = 0;
2145  int last_h = 0;
2146  enum AVPixelFormat last_format = -2;
2147  int last_serial = -1;
2148  int last_vfilter_idx = 0;
2149 #endif
2150 
2151  if (!frame)
2152  return AVERROR(ENOMEM);
2153 
2154  for (;;) {
2156  if (ret < 0)
2157  goto the_end;
2158  if (!ret)
2159  continue;
2160 
2161 #if CONFIG_AVFILTER
2162  if ( last_w != frame->width
2163  || last_h != frame->height
2164  || last_format != frame->format
2165  || last_serial != is->viddec.pkt_serial
2166  || last_vfilter_idx != is->vfilter_idx) {
2168  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2169  last_w, last_h,
2170  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2171  frame->width, frame->height,
2172  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2175  if (!graph) {
2176  ret = AVERROR(ENOMEM);
2177  goto the_end;
2178  }
2179  graph->nb_threads = filter_nbthreads;
2180  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2181  SDL_Event event;
2182  event.type = FF_QUIT_EVENT;
2183  event.user.data1 = is;
2184  SDL_PushEvent(&event);
2185  goto the_end;
2186  }
2187  filt_in = is->in_video_filter;
2188  filt_out = is->out_video_filter;
2189  last_w = frame->width;
2190  last_h = frame->height;
2191  last_format = frame->format;
2192  last_serial = is->viddec.pkt_serial;
2193  last_vfilter_idx = is->vfilter_idx;
2194  frame_rate = av_buffersink_get_frame_rate(filt_out);
2195  }
2196 
2197  ret = av_buffersrc_add_frame(filt_in, frame);
2198  if (ret < 0)
2199  goto the_end;
2200 
2201  while (ret >= 0) {
2202  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2203 
2204  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2205  if (ret < 0) {
2206  if (ret == AVERROR_EOF)
2207  is->viddec.finished = is->viddec.pkt_serial;
2208  ret = 0;
2209  break;
2210  }
2211 
2212  is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
2213  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2214  is->frame_last_filter_delay = 0;
2215  tb = av_buffersink_get_time_base(filt_out);
2216 #endif
2217  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2218  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2219  ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);
2221 #if CONFIG_AVFILTER
2222  if (is->videoq.serial != is->viddec.pkt_serial)
2223  break;
2224  }
2225 #endif
2226 
2227  if (ret < 0)
2228  goto the_end;
2229  }
2230  the_end:
2231 #if CONFIG_AVFILTER
2233 #endif
2234  av_frame_free(&frame);
2235  return 0;
2236 }
2237 
2238 static int subtitle_thread(void *arg)
2239 {
2240  VideoState *is = arg;
2241  Frame *sp;
2242  int got_subtitle;
2243  double pts;
2244 
2245  for (;;) {
2246  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2247  return 0;
2248 
2249  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2250  break;
2251 
2252  pts = 0;
2253 
2254  if (got_subtitle && sp->sub.format == 0) {
2255  if (sp->sub.pts != AV_NOPTS_VALUE)
2256  pts = sp->sub.pts / (double)AV_TIME_BASE;
2257  sp->pts = pts;
2258  sp->serial = is->subdec.pkt_serial;
2259  sp->width = is->subdec.avctx->width;
2260  sp->height = is->subdec.avctx->height;
2261  sp->uploaded = 0;
2262 
2263  /* now we can update the picture count */
2264  frame_queue_push(&is->subpq);
2265  } else if (got_subtitle) {
2266  avsubtitle_free(&sp->sub);
2267  }
2268  }
2269  return 0;
2270 }
2271 
2272 /* copy samples for viewing in editor window */
2273 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2274 {
2275  int size, len;
2276 
2277  size = samples_size / sizeof(short);
2278  while (size > 0) {
2279  len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2280  if (len > size)
2281  len = size;
2282  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2283  samples += len;
2284  is->sample_array_index += len;
2285  if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2286  is->sample_array_index = 0;
2287  size -= len;
2288  }
2289 }
2290 
2291 /* return the wanted number of samples to get better sync if sync_type is video
2292  * or external master clock */
2293 static int synchronize_audio(VideoState *is, int nb_samples)
2294 {
2295  int wanted_nb_samples = nb_samples;
2296 
2297  /* if not master, then we try to remove or add samples to correct the clock */
2299  double diff, avg_diff;
2300  int min_nb_samples, max_nb_samples;
2301 
2302  diff = get_clock(&is->audclk) - get_master_clock(is);
2303 
2304  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2305  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2306  if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2307  /* not enough measures to have a correct estimate */
2308  is->audio_diff_avg_count++;
2309  } else {
2310  /* estimate the A-V difference */
2311  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2312 
2313  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2314  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2315  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2316  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2317  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2318  }
2319  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2320  diff, avg_diff, wanted_nb_samples - nb_samples,
2321  is->audio_clock, is->audio_diff_threshold);
2322  }
2323  } else {
2324  /* too big difference : may be initial PTS errors, so
2325  reset A-V filter */
2326  is->audio_diff_avg_count = 0;
2327  is->audio_diff_cum = 0;
2328  }
2329  }
2330 
2331  return wanted_nb_samples;
2332 }
2333 
2334 /**
2335  * Decode one audio frame and return its uncompressed size.
2336  *
2337  * The processed audio frame is decoded, converted if required, and
2338  * stored in is->audio_buf, with size in bytes given by the return
2339  * value.
2340  */
2342 {
2343  int data_size, resampled_data_size;
2344  int64_t dec_channel_layout;
2345  av_unused double audio_clock0;
2346  int wanted_nb_samples;
2347  Frame *af;
2348 
2349  if (is->paused)
2350  return -1;
2351 
2352  do {
2353 #if defined(_WIN32)
2354  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2355  if ((av_gettime_relative() - audio_callback_time) > 1000000LL * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec / 2)
2356  return -1;
2357  av_usleep (1000);
2358  }
2359 #endif
2360  if (!(af = frame_queue_peek_readable(&is->sampq)))
2361  return -1;
2362  frame_queue_next(&is->sampq);
2363  } while (af->serial != is->audioq.serial);
2364 
2365  data_size = av_samples_get_buffer_size(NULL, af->frame->channels,
2366  af->frame->nb_samples,
2367  af->frame->format, 1);
2368 
2369  dec_channel_layout =
2372  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2373 
2374  if (af->frame->format != is->audio_src.fmt ||
2375  dec_channel_layout != is->audio_src.channel_layout ||
2376  af->frame->sample_rate != is->audio_src.freq ||
2377  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2378  swr_free(&is->swr_ctx);
2379  is->swr_ctx = swr_alloc_set_opts(NULL,
2380  is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2381  dec_channel_layout, af->frame->format, af->frame->sample_rate,
2382  0, NULL);
2383  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2385  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2387  is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
2388  swr_free(&is->swr_ctx);
2389  return -1;
2390  }
2391  is->audio_src.channel_layout = dec_channel_layout;
2392  is->audio_src.channels = af->frame->channels;
2393  is->audio_src.freq = af->frame->sample_rate;
2394  is->audio_src.fmt = af->frame->format;
2395  }
2396 
2397  if (is->swr_ctx) {
2398  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2399  uint8_t **out = &is->audio_buf1;
2400  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2401  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2402  int len2;
2403  if (out_size < 0) {
2404  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2405  return -1;
2406  }
2407  if (wanted_nb_samples != af->frame->nb_samples) {
2408  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2409  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2410  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2411  return -1;
2412  }
2413  }
2414  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2415  if (!is->audio_buf1)
2416  return AVERROR(ENOMEM);
2417  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2418  if (len2 < 0) {
2419  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2420  return -1;
2421  }
2422  if (len2 == out_count) {
2423  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2424  if (swr_init(is->swr_ctx) < 0)
2425  swr_free(&is->swr_ctx);
2426  }
2427  is->audio_buf = is->audio_buf1;
2428  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2429  } else {
2430  is->audio_buf = af->frame->data[0];
2431  resampled_data_size = data_size;
2432  }
2433 
2434  audio_clock0 = is->audio_clock;
2435  /* update the audio clock with the pts */
2436  if (!isnan(af->pts))
2437  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2438  else
2439  is->audio_clock = NAN;
2440  is->audio_clock_serial = af->serial;
2441 #ifdef DEBUG
2442  {
2443  static double last_clock;
2444  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2445  is->audio_clock - last_clock,
2446  is->audio_clock, audio_clock0);
2447  last_clock = is->audio_clock;
2448  }
2449 #endif
2450  return resampled_data_size;
2451 }
2452 
2453 /* prepare a new audio buffer */
2454 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2455 {
2456  VideoState *is = opaque;
2457  int audio_size, len1;
2458 
2460 
2461  while (len > 0) {
2462  if (is->audio_buf_index >= is->audio_buf_size) {
2463  audio_size = audio_decode_frame(is);
2464  if (audio_size < 0) {
2465  /* if error, just output silence */
2466  is->audio_buf = NULL;
2467  is->audio_buf_size = SDL_AUDIO_MIN_BUFFER_SIZE / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2468  } else {
2469  if (is->show_mode != SHOW_MODE_VIDEO)
2470  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2471  is->audio_buf_size = audio_size;
2472  }
2473  is->audio_buf_index = 0;
2474  }
2475  len1 = is->audio_buf_size - is->audio_buf_index;
2476  if (len1 > len)
2477  len1 = len;
2478  if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2479  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2480  else {
2481  memset(stream, 0, len1);
2482  if (!is->muted && is->audio_buf)
2483  SDL_MixAudioFormat(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, is->audio_volume);
2484  }
2485  len -= len1;
2486  stream += len1;
2487  is->audio_buf_index += len1;
2488  }
2489  is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2490  /* Let's assume the audio driver that is used by SDL has two periods. */
2491  if (!isnan(is->audio_clock)) {
2492  set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
2493  sync_clock_to_slave(&is->extclk, &is->audclk);
2494  }
2495 }
2496 
2497 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2498 {
2499  SDL_AudioSpec wanted_spec, spec;
2500  const char *env;
2501  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2502  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2503  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2504 
2505  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2506  if (env) {
2507  wanted_nb_channels = atoi(env);
2508  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2509  }
2510  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2511  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2512  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2513  }
2514  wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2515  wanted_spec.channels = wanted_nb_channels;
2516  wanted_spec.freq = wanted_sample_rate;
2517  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2518  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2519  return -1;
2520  }
2521  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2522  next_sample_rate_idx--;
2523  wanted_spec.format = AUDIO_S16SYS;
2524  wanted_spec.silence = 0;
2525  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2526  wanted_spec.callback = sdl_audio_callback;
2527  wanted_spec.userdata = opaque;
2528  while (!(audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2529  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2530  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2531  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2532  if (!wanted_spec.channels) {
2533  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2534  wanted_spec.channels = wanted_nb_channels;
2535  if (!wanted_spec.freq) {
2537  "No more combinations to try, audio open failed\n");
2538  return -1;
2539  }
2540  }
2541  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2542  }
2543  if (spec.format != AUDIO_S16SYS) {
2545  "SDL advised audio format %d is not supported!\n", spec.format);
2546  return -1;
2547  }
2548  if (spec.channels != wanted_spec.channels) {
2549  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2550  if (!wanted_channel_layout) {
2552  "SDL advised channel count %d is not supported!\n", spec.channels);
2553  return -1;
2554  }
2555  }
2556 
2557  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2558  audio_hw_params->freq = spec.freq;
2559  audio_hw_params->channel_layout = wanted_channel_layout;
2560  audio_hw_params->channels = spec.channels;
2561  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
2562  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2563  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2564  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2565  return -1;
2566  }
2567  return spec.size;
2568 }
2569 
2570 /* open a given stream. Return 0 if OK */
2571 static int stream_component_open(VideoState *is, int stream_index)
2572 {
2573  AVFormatContext *ic = is->ic;
2574  AVCodecContext *avctx;
2575  const AVCodec *codec;
2576  const char *forced_codec_name = NULL;
2577  AVDictionary *opts = NULL;
2578  AVDictionaryEntry *t = NULL;
2579  int sample_rate, nb_channels;
2580  int64_t channel_layout;
2581  int ret = 0;
2582  int stream_lowres = lowres;
2583 
2584  if (stream_index < 0 || stream_index >= ic->nb_streams)
2585  return -1;
2586 
2587  avctx = avcodec_alloc_context3(NULL);
2588  if (!avctx)
2589  return AVERROR(ENOMEM);
2590 
2591  ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2592  if (ret < 0)
2593  goto fail;
2594  avctx->pkt_timebase = ic->streams[stream_index]->time_base;
2595 
2596  codec = avcodec_find_decoder(avctx->codec_id);
2597 
2598  switch(avctx->codec_type){
2599  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2600  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2601  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2602  }
2603  if (forced_codec_name)
2604  codec = avcodec_find_decoder_by_name(forced_codec_name);
2605  if (!codec) {
2606  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2607  "No codec could be found with name '%s'\n", forced_codec_name);
2608  else av_log(NULL, AV_LOG_WARNING,
2609  "No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id));
2610  ret = AVERROR(EINVAL);
2611  goto fail;
2612  }
2613 
2614  avctx->codec_id = codec->id;
2615  if (stream_lowres > codec->max_lowres) {
2616  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2617  codec->max_lowres);
2618  stream_lowres = codec->max_lowres;
2619  }
2620  avctx->lowres = stream_lowres;
2621 
2622  if (fast)
2623  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2624 
2625  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2626  if (!av_dict_get(opts, "threads", NULL, 0))
2627  av_dict_set(&opts, "threads", "auto", 0);
2628  if (stream_lowres)
2629  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2630  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2631  goto fail;
2632  }
2633  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2634  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2636  goto fail;
2637  }
2638 
2639  is->eof = 0;
2640  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2641  switch (avctx->codec_type) {
2642  case AVMEDIA_TYPE_AUDIO:
2643 #if CONFIG_AVFILTER
2644  {
2645  AVFilterContext *sink;
2646 
2647  is->audio_filter_src.freq = avctx->sample_rate;
2648  is->audio_filter_src.channels = avctx->channels;
2649  is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2650  is->audio_filter_src.fmt = avctx->sample_fmt;
2651  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2652  goto fail;
2653  sink = is->out_audio_filter;
2656  channel_layout = av_buffersink_get_channel_layout(sink);
2657  }
2658 #else
2659  sample_rate = avctx->sample_rate;
2660  nb_channels = avctx->channels;
2661  channel_layout = avctx->channel_layout;
2662 #endif
2663 
2664  /* prepare audio output */
2665  if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2666  goto fail;
2667  is->audio_hw_buf_size = ret;
2668  is->audio_src = is->audio_tgt;
2669  is->audio_buf_size = 0;
2670  is->audio_buf_index = 0;
2671 
2672  /* init averaging filter */
2673  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2674  is->audio_diff_avg_count = 0;
2675  /* since we do not have a precise anough audio FIFO fullness,
2676  we correct audio sync only if larger than this threshold */
2677  is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;
2678 
2679  is->audio_stream = stream_index;
2680  is->audio_st = ic->streams[stream_index];
2681 
2682  if ((ret = decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread)) < 0)
2683  goto fail;
2684  if ((is->ic->iformat->flags & (AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH | AVFMT_NO_BYTE_SEEK)) && !is->ic->iformat->read_seek) {
2685  is->auddec.start_pts = is->audio_st->start_time;
2686  is->auddec.start_pts_tb = is->audio_st->time_base;
2687  }
2688  if ((ret = decoder_start(&is->auddec, audio_thread, "audio_decoder", is)) < 0)
2689  goto out;
2690  SDL_PauseAudioDevice(audio_dev, 0);
2691  break;
2692  case AVMEDIA_TYPE_VIDEO:
2693  is->video_stream = stream_index;
2694  is->video_st = ic->streams[stream_index];
2695 
2696  if ((ret = decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread)) < 0)
2697  goto fail;
2698  if ((ret = decoder_start(&is->viddec, video_thread, "video_decoder", is)) < 0)
2699  goto out;
2700  is->queue_attachments_req = 1;
2701  break;
2702  case AVMEDIA_TYPE_SUBTITLE:
2703  is->subtitle_stream = stream_index;
2704  is->subtitle_st = ic->streams[stream_index];
2705 
2706  if ((ret = decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread)) < 0)
2707  goto fail;
2708  if ((ret = decoder_start(&is->subdec, subtitle_thread, "subtitle_decoder", is)) < 0)
2709  goto out;
2710  break;
2711  default:
2712  break;
2713  }
2714  goto out;
2715 
2716 fail:
2717  avcodec_free_context(&avctx);
2718 out:
2719  av_dict_free(&opts);
2720 
2721  return ret;
2722 }
2723 
2724 static int decode_interrupt_cb(void *ctx)
2725 {
2726  VideoState *is = ctx;
2727  return is->abort_request;
2728 }
2729 
2730 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2731  return stream_id < 0 ||
2732  queue->abort_request ||
2734  queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2735 }
2736 
2738 {
2739  if( !strcmp(s->iformat->name, "rtp")
2740  || !strcmp(s->iformat->name, "rtsp")
2741  || !strcmp(s->iformat->name, "sdp")
2742  )
2743  return 1;
2744 
2745  if(s->pb && ( !strncmp(s->url, "rtp:", 4)
2746  || !strncmp(s->url, "udp:", 4)
2747  )
2748  )
2749  return 1;
2750  return 0;
2751 }
2752 
2753 /* this thread gets the stream from the disk or the network */
2754 static int read_thread(void *arg)
2755 {
2756  VideoState *is = arg;
2757  AVFormatContext *ic = NULL;
2758  int err, i, ret;
2759  int st_index[AVMEDIA_TYPE_NB];
2760  AVPacket *pkt = NULL;
2761  int64_t stream_start_time;
2762  int pkt_in_play_range = 0;
2763  AVDictionaryEntry *t;
2764  SDL_mutex *wait_mutex = SDL_CreateMutex();
2765  int scan_all_pmts_set = 0;
2766  int64_t pkt_ts;
2767 
2768  if (!wait_mutex) {
2769  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2770  ret = AVERROR(ENOMEM);
2771  goto fail;
2772  }
2773 
2774  memset(st_index, -1, sizeof(st_index));
2775  is->eof = 0;
2776 
2777  pkt = av_packet_alloc();
2778  if (!pkt) {
2779  av_log(NULL, AV_LOG_FATAL, "Could not allocate packet.\n");
2780  ret = AVERROR(ENOMEM);
2781  goto fail;
2782  }
2783  ic = avformat_alloc_context();
2784  if (!ic) {
2785  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2786  ret = AVERROR(ENOMEM);
2787  goto fail;
2788  }
2791  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2792  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2793  scan_all_pmts_set = 1;
2794  }
2795  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2796  if (err < 0) {
2797  print_error(is->filename, err);
2798  ret = -1;
2799  goto fail;
2800  }
2801  if (scan_all_pmts_set)
2802  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2803 
2805  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2807  goto fail;
2808  }
2809  is->ic = ic;
2810 
2811  if (genpts)
2812  ic->flags |= AVFMT_FLAG_GENPTS;
2813 
2815 
2816  if (find_stream_info) {
2818  int orig_nb_streams = ic->nb_streams;
2819 
2820  err = avformat_find_stream_info(ic, opts);
2821 
2822  for (i = 0; i < orig_nb_streams; i++)
2823  av_dict_free(&opts[i]);
2824  av_freep(&opts);
2825 
2826  if (err < 0) {
2828  "%s: could not find codec parameters\n", is->filename);
2829  ret = -1;
2830  goto fail;
2831  }
2832  }
2833 
2834  if (ic->pb)
2835  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2836 
2837  if (seek_by_bytes < 0)
2838  seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2839 
2840  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2841 
2842  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2843  window_title = av_asprintf("%s - %s", t->value, input_filename);
2844 
2845  /* if seeking requested, we execute it */
2846  if (start_time != AV_NOPTS_VALUE) {
2847  int64_t timestamp;
2848 
2849  timestamp = start_time;
2850  /* add the stream start time */
2851  if (ic->start_time != AV_NOPTS_VALUE)
2852  timestamp += ic->start_time;
2853  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2854  if (ret < 0) {
2855  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2856  is->filename, (double)timestamp / AV_TIME_BASE);
2857  }
2858  }
2859 
2860  is->realtime = is_realtime(ic);
2861 
2862  if (show_status)
2863  av_dump_format(ic, 0, is->filename, 0);
2864 
2865  for (i = 0; i < ic->nb_streams; i++) {
2866  AVStream *st = ic->streams[i];
2867  enum AVMediaType type = st->codecpar->codec_type;
2868  st->discard = AVDISCARD_ALL;
2869  if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
2871  st_index[type] = i;
2872  }
2873  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2874  if (wanted_stream_spec[i] && st_index[i] == -1) {
2875  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2876  st_index[i] = INT_MAX;
2877  }
2878  }
2879 
2880  if (!video_disable)
2881  st_index[AVMEDIA_TYPE_VIDEO] =
2883  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2884  if (!audio_disable)
2885  st_index[AVMEDIA_TYPE_AUDIO] =
2887  st_index[AVMEDIA_TYPE_AUDIO],
2888  st_index[AVMEDIA_TYPE_VIDEO],
2889  NULL, 0);
2891  st_index[AVMEDIA_TYPE_SUBTITLE] =
2893  st_index[AVMEDIA_TYPE_SUBTITLE],
2894  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2895  st_index[AVMEDIA_TYPE_AUDIO] :
2896  st_index[AVMEDIA_TYPE_VIDEO]),
2897  NULL, 0);
2898 
2899  is->show_mode = show_mode;
2900  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2901  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2902  AVCodecParameters *codecpar = st->codecpar;
2904  if (codecpar->width)
2905  set_default_window_size(codecpar->width, codecpar->height, sar);
2906  }
2907 
2908  /* open the streams */
2909  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2911  }
2912 
2913  ret = -1;
2914  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2916  }
2917  if (is->show_mode == SHOW_MODE_NONE)
2918  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2919 
2920  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2922  }
2923 
2924  if (is->video_stream < 0 && is->audio_stream < 0) {
2925  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2926  is->filename);
2927  ret = -1;
2928  goto fail;
2929  }
2930 
2931  if (infinite_buffer < 0 && is->realtime)
2932  infinite_buffer = 1;
2933 
2934  for (;;) {
2935  if (is->abort_request)
2936  break;
2937  if (is->paused != is->last_paused) {
2938  is->last_paused = is->paused;
2939  if (is->paused)
2940  is->read_pause_return = av_read_pause(ic);
2941  else
2942  av_read_play(ic);
2943  }
2944 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2945  if (is->paused &&
2946  (!strcmp(ic->iformat->name, "rtsp") ||
2947  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2948  /* wait 10 ms to avoid trying to get another packet */
2949  /* XXX: horrible */
2950  SDL_Delay(10);
2951  continue;
2952  }
2953 #endif
2954  if (is->seek_req) {
2955  int64_t seek_target = is->seek_pos;
2956  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2957  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2958 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2959 // of the seek_pos/seek_rel variables
2960 
2961  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2962  if (ret < 0) {
2964  "%s: error while seeking\n", is->ic->url);
2965  } else {
2966  if (is->audio_stream >= 0)
2967  packet_queue_flush(&is->audioq);
2968  if (is->subtitle_stream >= 0)
2969  packet_queue_flush(&is->subtitleq);
2970  if (is->video_stream >= 0)
2971  packet_queue_flush(&is->videoq);
2972  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2973  set_clock(&is->extclk, NAN, 0);
2974  } else {
2975  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
2976  }
2977  }
2978  is->seek_req = 0;
2979  is->queue_attachments_req = 1;
2980  is->eof = 0;
2981  if (is->paused)
2983  }
2984  if (is->queue_attachments_req) {
2985  if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
2986  if ((ret = av_packet_ref(pkt, &is->video_st->attached_pic)) < 0)
2987  goto fail;
2988  packet_queue_put(&is->videoq, pkt);
2989  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
2990  }
2991  is->queue_attachments_req = 0;
2992  }
2993 
2994  /* if the queue are full, no need to read more */
2995  if (infinite_buffer<1 &&
2996  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2997  || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
2998  stream_has_enough_packets(is->video_st, is->video_stream, &is->videoq) &&
2999  stream_has_enough_packets(is->subtitle_st, is->subtitle_stream, &is->subtitleq)))) {
3000  /* wait 10 ms */
3001  SDL_LockMutex(wait_mutex);
3002  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3003  SDL_UnlockMutex(wait_mutex);
3004  continue;
3005  }
3006  if (!is->paused &&
3007  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3008  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3009  if (loop != 1 && (!loop || --loop)) {
3011  } else if (autoexit) {
3012  ret = AVERROR_EOF;
3013  goto fail;
3014  }
3015  }
3016  ret = av_read_frame(ic, pkt);
3017  if (ret < 0) {
3018  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3019  if (is->video_stream >= 0)
3020  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
3021  if (is->audio_stream >= 0)
3022  packet_queue_put_nullpacket(&is->audioq, pkt, is->audio_stream);
3023  if (is->subtitle_stream >= 0)
3024  packet_queue_put_nullpacket(&is->subtitleq, pkt, is->subtitle_stream);
3025  is->eof = 1;
3026  }
3027  if (ic->pb && ic->pb->error) {
3028  if (autoexit)
3029  goto fail;
3030  else
3031  break;
3032  }
3033  SDL_LockMutex(wait_mutex);
3034  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3035  SDL_UnlockMutex(wait_mutex);
3036  continue;
3037  } else {
3038  is->eof = 0;
3039  }
3040  /* check if packet is in play range specified by user, then queue, otherwise discard */
3041  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3042  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3043  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3044  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3046  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3047  <= ((double)duration / 1000000);
3048  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3049  packet_queue_put(&is->audioq, pkt);
3050  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3051  && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
3052  packet_queue_put(&is->videoq, pkt);
3053  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3054  packet_queue_put(&is->subtitleq, pkt);
3055  } else {
3057  }
3058  }
3059 
3060  ret = 0;
3061  fail:
3062  if (ic && !is->ic)
3063  avformat_close_input(&ic);
3064 
3065  av_packet_free(&pkt);
3066  if (ret != 0) {
3067  SDL_Event event;
3068 
3069  event.type = FF_QUIT_EVENT;
3070  event.user.data1 = is;
3071  SDL_PushEvent(&event);
3072  }
3073  SDL_DestroyMutex(wait_mutex);
3074  return 0;
3075 }
3076 
3077 static VideoState *stream_open(const char *filename,
3078  const AVInputFormat *iformat)
3079 {
3080  VideoState *is;
3081 
3082  is = av_mallocz(sizeof(VideoState));
3083  if (!is)
3084  return NULL;
3085  is->last_video_stream = is->video_stream = -1;
3086  is->last_audio_stream = is->audio_stream = -1;
3087  is->last_subtitle_stream = is->subtitle_stream = -1;
3088  is->filename = av_strdup(filename);
3089  if (!is->filename)
3090  goto fail;
3091  is->iformat = iformat;
3092  is->ytop = 0;
3093  is->xleft = 0;
3094 
3095  /* start video display */
3096  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3097  goto fail;
3098  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3099  goto fail;
3100  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3101  goto fail;
3102 
3103  if (packet_queue_init(&is->videoq) < 0 ||
3104  packet_queue_init(&is->audioq) < 0 ||
3105  packet_queue_init(&is->subtitleq) < 0)
3106  goto fail;
3107 
3108  if (!(is->continue_read_thread = SDL_CreateCond())) {
3109  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3110  goto fail;
3111  }
3112 
3113  init_clock(&is->vidclk, &is->videoq.serial);
3114  init_clock(&is->audclk, &is->audioq.serial);
3115  init_clock(&is->extclk, &is->extclk.serial);
3116  is->audio_clock_serial = -1;
3117  if (startup_volume < 0)
3118  av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
3119  if (startup_volume > 100)
3120  av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
3122  startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
3123  is->audio_volume = startup_volume;
3124  is->muted = 0;
3125  is->av_sync_type = av_sync_type;
3126  is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
3127  if (!is->read_tid) {
3128  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3129 fail:
3130  stream_close(is);
3131  return NULL;
3132  }
3133  return is;
3134 }
3135 
3137 {
3138  AVFormatContext *ic = is->ic;
3139  int start_index, stream_index;
3140  int old_index;
3141  AVStream *st;
3142  AVProgram *p = NULL;
3143  int nb_streams = is->ic->nb_streams;
3144 
3145  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3146  start_index = is->last_video_stream;
3147  old_index = is->video_stream;
3148  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3149  start_index = is->last_audio_stream;
3150  old_index = is->audio_stream;
3151  } else {
3152  start_index = is->last_subtitle_stream;
3153  old_index = is->subtitle_stream;
3154  }
3155  stream_index = start_index;
3156 
3157  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3158  p = av_find_program_from_stream(ic, NULL, is->video_stream);
3159  if (p) {
3161  for (start_index = 0; start_index < nb_streams; start_index++)
3162  if (p->stream_index[start_index] == stream_index)
3163  break;
3164  if (start_index == nb_streams)
3165  start_index = -1;
3166  stream_index = start_index;
3167  }
3168  }
3169 
3170  for (;;) {
3171  if (++stream_index >= nb_streams)
3172  {
3174  {
3175  stream_index = -1;
3176  is->last_subtitle_stream = -1;
3177  goto the_end;
3178  }
3179  if (start_index == -1)
3180  return;
3181  stream_index = 0;
3182  }
3183  if (stream_index == start_index)
3184  return;
3185  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3186  if (st->codecpar->codec_type == codec_type) {
3187  /* check that parameters are OK */
3188  switch (codec_type) {
3189  case AVMEDIA_TYPE_AUDIO:
3190  if (st->codecpar->sample_rate != 0 &&
3191  st->codecpar->channels != 0)
3192  goto the_end;
3193  break;
3194  case AVMEDIA_TYPE_VIDEO:
3195  case AVMEDIA_TYPE_SUBTITLE:
3196  goto the_end;
3197  default:
3198  break;
3199  }
3200  }
3201  }
3202  the_end:
3203  if (p && stream_index != -1)
3204  stream_index = p->stream_index[stream_index];
3205  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3207  old_index,
3208  stream_index);
3209 
3210  stream_component_close(is, old_index);
3211  stream_component_open(is, stream_index);
3212 }
3213 
3214 
3216 {
3218  SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
3219 }
3220 
3222 {
3223  int next = is->show_mode;
3224  do {
3225  next = (next + 1) % SHOW_MODE_NB;
3226  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3227  if (is->show_mode != next) {
3228  is->force_refresh = 1;
3229  is->show_mode = next;
3230  }
3231 }
3232 
3233 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3234  double remaining_time = 0.0;
3235  SDL_PumpEvents();
3236  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3238  SDL_ShowCursor(0);
3239  cursor_hidden = 1;
3240  }
3241  if (remaining_time > 0.0)
3242  av_usleep((int64_t)(remaining_time * 1000000.0));
3243  remaining_time = REFRESH_RATE;
3244  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3245  video_refresh(is, &remaining_time);
3246  SDL_PumpEvents();
3247  }
3248 }
3249 
3250 static void seek_chapter(VideoState *is, int incr)
3251 {
3252  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3253  int i;
3254 
3255  if (!is->ic->nb_chapters)
3256  return;
3257 
3258  /* find the current chapter */
3259  for (i = 0; i < is->ic->nb_chapters; i++) {
3260  AVChapter *ch = is->ic->chapters[i];
3261  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3262  i--;
3263  break;
3264  }
3265  }
3266 
3267  i += incr;
3268  i = FFMAX(i, 0);
3269  if (i >= is->ic->nb_chapters)
3270  return;
3271 
3272  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3273  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3274  AV_TIME_BASE_Q), 0, 0);
3275 }
3276 
3277 /* handle an event sent by the GUI */
3278 static void event_loop(VideoState *cur_stream)
3279 {
3280  SDL_Event event;
3281  double incr, pos, frac;
3282 
3283  for (;;) {
3284  double x;
3285  refresh_loop_wait_event(cur_stream, &event);
3286  switch (event.type) {
3287  case SDL_KEYDOWN:
3288  if (exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
3289  do_exit(cur_stream);
3290  break;
3291  }
3292  // If we don't yet have a window, skip all key events, because read_thread might still be initializing...
3293  if (!cur_stream->width)
3294  continue;
3295  switch (event.key.keysym.sym) {
3296  case SDLK_f:
3297  toggle_full_screen(cur_stream);
3298  cur_stream->force_refresh = 1;
3299  break;
3300  case SDLK_p:
3301  case SDLK_SPACE:
3302  toggle_pause(cur_stream);
3303  break;
3304  case SDLK_m:
3305  toggle_mute(cur_stream);
3306  break;
3307  case SDLK_KP_MULTIPLY:
3308  case SDLK_0:
3309  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3310  break;
3311  case SDLK_KP_DIVIDE:
3312  case SDLK_9:
3313  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3314  break;
3315  case SDLK_s: // S: Step to next frame
3316  step_to_next_frame(cur_stream);
3317  break;
3318  case SDLK_a:
3320  break;
3321  case SDLK_v:
3323  break;
3324  case SDLK_c:
3328  break;
3329  case SDLK_t:
3331  break;
3332  case SDLK_w:
3333 #if CONFIG_AVFILTER
3334  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3335  if (++cur_stream->vfilter_idx >= nb_vfilters)
3336  cur_stream->vfilter_idx = 0;
3337  } else {
3338  cur_stream->vfilter_idx = 0;
3339  toggle_audio_display(cur_stream);
3340  }
3341 #else
3342  toggle_audio_display(cur_stream);
3343 #endif
3344  break;
3345  case SDLK_PAGEUP:
3346  if (cur_stream->ic->nb_chapters <= 1) {
3347  incr = 600.0;
3348  goto do_seek;
3349  }
3350  seek_chapter(cur_stream, 1);
3351  break;
3352  case SDLK_PAGEDOWN:
3353  if (cur_stream->ic->nb_chapters <= 1) {
3354  incr = -600.0;
3355  goto do_seek;
3356  }
3357  seek_chapter(cur_stream, -1);
3358  break;
3359  case SDLK_LEFT:
3360  incr = seek_interval ? -seek_interval : -10.0;
3361  goto do_seek;
3362  case SDLK_RIGHT:
3363  incr = seek_interval ? seek_interval : 10.0;
3364  goto do_seek;
3365  case SDLK_UP:
3366  incr = 60.0;
3367  goto do_seek;
3368  case SDLK_DOWN:
3369  incr = -60.0;
3370  do_seek:
3371  if (seek_by_bytes) {
3372  pos = -1;
3373  if (pos < 0 && cur_stream->video_stream >= 0)
3374  pos = frame_queue_last_pos(&cur_stream->pictq);
3375  if (pos < 0 && cur_stream->audio_stream >= 0)
3376  pos = frame_queue_last_pos(&cur_stream->sampq);
3377  if (pos < 0)
3378  pos = avio_tell(cur_stream->ic->pb);
3379  if (cur_stream->ic->bit_rate)
3380  incr *= cur_stream->ic->bit_rate / 8.0;
3381  else
3382  incr *= 180000.0;
3383  pos += incr;
3384  stream_seek(cur_stream, pos, incr, 1);
3385  } else {
3386  pos = get_master_clock(cur_stream);
3387  if (isnan(pos))
3388  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3389  pos += incr;
3390  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3391  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3392  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3393  }
3394  break;
3395  default:
3396  break;
3397  }
3398  break;
3399  case SDL_MOUSEBUTTONDOWN:
3400  if (exit_on_mousedown) {
3401  do_exit(cur_stream);
3402  break;
3403  }
3404  if (event.button.button == SDL_BUTTON_LEFT) {
3405  static int64_t last_mouse_left_click = 0;
3406  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3407  toggle_full_screen(cur_stream);
3408  cur_stream->force_refresh = 1;
3409  last_mouse_left_click = 0;
3410  } else {
3411  last_mouse_left_click = av_gettime_relative();
3412  }
3413  }
3414  case SDL_MOUSEMOTION:
3415  if (cursor_hidden) {
3416  SDL_ShowCursor(1);
3417  cursor_hidden = 0;
3418  }
3420  if (event.type == SDL_MOUSEBUTTONDOWN) {
3421  if (event.button.button != SDL_BUTTON_RIGHT)
3422  break;
3423  x = event.button.x;
3424  } else {
3425  if (!(event.motion.state & SDL_BUTTON_RMASK))
3426  break;
3427  x = event.motion.x;
3428  }
3429  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3430  uint64_t size = avio_size(cur_stream->ic->pb);
3431  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3432  } else {
3433  int64_t ts;
3434  int ns, hh, mm, ss;
3435  int tns, thh, tmm, tss;
3436  tns = cur_stream->ic->duration / 1000000LL;
3437  thh = tns / 3600;
3438  tmm = (tns % 3600) / 60;
3439  tss = (tns % 60);
3440  frac = x / cur_stream->width;
3441  ns = frac * tns;
3442  hh = ns / 3600;
3443  mm = (ns % 3600) / 60;
3444  ss = (ns % 60);
3446  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3447  hh, mm, ss, thh, tmm, tss);
3448  ts = frac * cur_stream->ic->duration;
3449  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3450  ts += cur_stream->ic->start_time;
3451  stream_seek(cur_stream, ts, 0, 0);
3452  }
3453  break;
3454  case SDL_WINDOWEVENT:
3455  switch (event.window.event) {
3456  case SDL_WINDOWEVENT_SIZE_CHANGED:
3457  screen_width = cur_stream->width = event.window.data1;
3458  screen_height = cur_stream->height = event.window.data2;
3459  if (cur_stream->vis_texture) {
3460  SDL_DestroyTexture(cur_stream->vis_texture);
3461  cur_stream->vis_texture = NULL;
3462  }
3463  case SDL_WINDOWEVENT_EXPOSED:
3464  cur_stream->force_refresh = 1;
3465  }
3466  break;
3467  case SDL_QUIT:
3468  case FF_QUIT_EVENT:
3469  do_exit(cur_stream);
3470  break;
3471  default:
3472  break;
3473  }
3474  }
3475 }
3476 
3477 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3478 {
3479  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3480  return opt_default(NULL, "video_size", arg);
3481 }
3482 
3483 static int opt_width(void *optctx, const char *opt, const char *arg)
3484 {
3485  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3486  return 0;
3487 }
3488 
3489 static int opt_height(void *optctx, const char *opt, const char *arg)
3490 {
3491  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3492  return 0;
3493 }
3494 
3495 static int opt_format(void *optctx, const char *opt, const char *arg)
3496 {
3498  if (!file_iformat) {
3499  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3500  return AVERROR(EINVAL);
3501  }
3502  return 0;
3503 }
3504 
3505 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3506 {
3507  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3508  return opt_default(NULL, "pixel_format", arg);
3509 }
3510 
3511 static int opt_sync(void *optctx, const char *opt, const char *arg)
3512 {
3513  if (!strcmp(arg, "audio"))
3515  else if (!strcmp(arg, "video"))
3517  else if (!strcmp(arg, "ext"))
3519  else {
3520  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3521  exit(1);
3522  }
3523  return 0;
3524 }
3525 
3526 static int opt_seek(void *optctx, const char *opt, const char *arg)
3527 {
3528  start_time = parse_time_or_die(opt, arg, 1);
3529  return 0;
3530 }
3531 
3532 static int opt_duration(void *optctx, const char *opt, const char *arg)
3533 {
3534  duration = parse_time_or_die(opt, arg, 1);
3535  return 0;
3536 }
3537 
3538 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3539 {
3540  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3541  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3542  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3543  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3544  return 0;
3545 }
3546 
3547 static void opt_input_file(void *optctx, const char *filename)
3548 {
3549  if (input_filename) {
3551  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3552  filename, input_filename);
3553  exit(1);
3554  }
3555  if (!strcmp(filename, "-"))
3556  filename = "pipe:";
3557  input_filename = filename;
3558 }
3559 
3560 static int opt_codec(void *optctx, const char *opt, const char *arg)
3561 {
3562  const char *spec = strchr(opt, ':');
3563  if (!spec) {
3565  "No media specifier was specified in '%s' in option '%s'\n",
3566  arg, opt);
3567  return AVERROR(EINVAL);
3568  }
3569  spec++;
3570  switch (spec[0]) {
3571  case 'a' : audio_codec_name = arg; break;
3572  case 's' : subtitle_codec_name = arg; break;
3573  case 'v' : video_codec_name = arg; break;
3574  default:
3576  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3577  return AVERROR(EINVAL);
3578  }
3579  return 0;
3580 }
3581 
3582 static int dummy;
3583 
3584 static const OptionDef options[] = {
3586  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3587  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3588  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3589  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3590  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3591  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3592  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3593  { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3594  { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3595  { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3596  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3597  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3598  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3599  { "seek_interval", OPT_FLOAT | HAS_ARG, { &seek_interval }, "set seek interval for left/right keys, in seconds", "seconds" },
3600  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3601  { "noborder", OPT_BOOL, { &borderless }, "borderless window" },
3602  { "alwaysontop", OPT_BOOL, { &alwaysontop }, "window always on top" },
3603  { "volume", OPT_INT | HAS_ARG, { &startup_volume}, "set startup volume 0=min 100=max", "volume" },
3604  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3605  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3606  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3607  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3608  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3609  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3610  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3611  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3612  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3613  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3614  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3615  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3616  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3617  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3618  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3619  { "left", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_left }, "set the x position for the left of the window", "x pos" },
3620  { "top", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_top }, "set the y position for the top of the window", "y pos" },
3621 #if CONFIG_AVFILTER
3622  { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3623  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3624 #endif
3625  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3626  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3627  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3628  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3629  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3630  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3631  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3632  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3633  { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3634  { "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
3635  "read and decode the streams to fill missing information with heuristics" },
3636  { "filter_threads", HAS_ARG | OPT_INT | OPT_EXPERT, { &filter_nbthreads }, "number of filter threads per graph" },
3637  { NULL, },
3638 };
3639 
3640 static void show_usage(void)
3641 {
3642  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3643  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3644  av_log(NULL, AV_LOG_INFO, "\n");
3645 }
3646 
3647 void show_help_default(const char *opt, const char *arg)
3648 {
3650  show_usage();
3651  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3652  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3653  printf("\n");
3656 #if !CONFIG_AVFILTER
3658 #else
3660 #endif
3661  printf("\nWhile playing:\n"
3662  "q, ESC quit\n"
3663  "f toggle full screen\n"
3664  "p, SPC pause\n"
3665  "m toggle mute\n"
3666  "9, 0 decrease and increase volume respectively\n"
3667  "/, * decrease and increase volume respectively\n"
3668  "a cycle audio channel in the current program\n"
3669  "v cycle video channel\n"
3670  "t cycle subtitle channel in the current program\n"
3671  "c cycle program\n"
3672  "w cycle video filters or show modes\n"
3673  "s activate frame-step mode\n"
3674  "left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
3675  "down/up seek backward/forward 1 minute\n"
3676  "page down/page up seek backward/forward 10 minutes\n"
3677  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3678  "left double-click toggle full screen\n"
3679  );
3680 }
3681 
3682 /* Called from the main */
3683 int main(int argc, char **argv)
3684 {
3685  int flags;
3686  VideoState *is;
3687 
3688  init_dynload();
3689 
3691  parse_loglevel(argc, argv, options);
3692 
3693  /* register all codecs, demux and protocols */
3694 #if CONFIG_AVDEVICE
3696 #endif
3698 
3699  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3700  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3701 
3702  show_banner(argc, argv, options);
3703 
3704  parse_options(NULL, argc, argv, options, opt_input_file);
3705 
3706  if (!input_filename) {
3707  show_usage();
3708  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3710  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3711  exit(1);
3712  }
3713 
3714  if (display_disable) {
3715  video_disable = 1;
3716  }
3717  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3718  if (audio_disable)
3719  flags &= ~SDL_INIT_AUDIO;
3720  else {
3721  /* Try to work around an occasional ALSA buffer underflow issue when the
3722  * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3723  if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3724  SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
3725  }
3726  if (display_disable)
3727  flags &= ~SDL_INIT_VIDEO;
3728  if (SDL_Init (flags)) {
3729  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3730  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3731  exit(1);
3732  }
3733 
3734  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3735  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3736 
3737  if (!display_disable) {
3738  int flags = SDL_WINDOW_HIDDEN;
3739  if (alwaysontop)
3740 #if SDL_VERSION_ATLEAST(2,0,5)
3741  flags |= SDL_WINDOW_ALWAYS_ON_TOP;
3742 #else
3743  av_log(NULL, AV_LOG_WARNING, "Your SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.\n");
3744 #endif
3745  if (borderless)
3746  flags |= SDL_WINDOW_BORDERLESS;
3747  else
3748  flags |= SDL_WINDOW_RESIZABLE;
3749  window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
3750  SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
3751  if (window) {
3752  renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
3753  if (!renderer) {
3754  av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
3755  renderer = SDL_CreateRenderer(window, -1, 0);
3756  }
3757  if (renderer) {
3758  if (!SDL_GetRendererInfo(renderer, &renderer_info))
3759  av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", renderer_info.name);
3760  }
3761  }
3762  if (!window || !renderer || !renderer_info.num_texture_formats) {
3763  av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError());
3764  do_exit(NULL);
3765  }
3766  }
3767 
3769  if (!is) {
3770  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3771  do_exit(NULL);
3772  }
3773 
3774  event_loop(is);
3775 
3776  /* never returns */
3777 
3778  return 0;
3779 }
OPT_FLOAT
#define OPT_FLOAT
Definition: cmdutils.h:173
AVSubtitle
Definition: avcodec.h:2289
rect::w
int w
Definition: f_ebur128.c:77
sws_getCachedContext
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2408
do_exit
static void do_exit(VideoState *is)
Definition: ffplay.c:1297
VideoState::seek_rel
int64_t seek_rel
Definition: ffplay.c:213
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:417
AVCodec
AVCodec.
Definition: codec.h:202
opt_frame_size
static int opt_frame_size(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3477
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
VideoState::video_st
AVStream * video_st
Definition: ffplay.c:283
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
Frame::width
int width
Definition: ffplay.c:160
AVFMT_NO_BYTE_SEEK
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:475
av_clip
#define av_clip
Definition: common.h:96
AudioParams::fmt
enum AVSampleFormat fmt
Definition: ffplay.c:137
av_sync_type
static int av_sync_type
Definition: ffplay.c:329
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
renderer_info
static SDL_RendererInfo renderer_info
Definition: ffplay.c:366
configure_filtergraph
int configure_filtergraph(FilterGraph *fg)
Definition: ffmpeg_filter.c:978
frame_queue_nb_remaining
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:802
frame_queue_next
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:786
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:56
Decoder::finished
int finished
Definition: ffplay.c:192
AVCodecContext::channel_layout
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:1043
av_compare_ts
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:146
AVProgram::nb_stream_indexes
unsigned int nb_stream_indexes
Definition: avformat.h:1026
av_fifo_generic_write
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int(*func)(void *, void *, int))
Feed data from a user-supplied callback to an AVFifoBuffer.
Definition: fifo.c:122
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:234
frame_queue_last_pos
static int64_t frame_queue_last_pos(FrameQueue *f)
Definition: ffplay.c:808
out
FILE * out
Definition: movenc.c:54
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:992
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:68
video_thread
static int video_thread(void *arg)
Definition: ffplay.c:2131
VideoState::av_sync_type
int av_sync_type
Definition: ffplay.c:232
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
set_default_window_size
static void set_default_window_size(int width, int height, AVRational sar)
Definition: ffplay.c:1323
AV_PIX_FMT_BGR32
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:364
AV_NOSYNC_THRESHOLD
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:86
AVFormatContext::nb_chapters
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1315
AVCodecParameters
This struct describes the properties of an encoded stream.
Definition: codec_par.h:52
AV_LOG_QUIET
#define AV_LOG_QUIET
Print no output.
Definition: log.h:162
FrameQueue::keep_last
int keep_last
Definition: ffplay.c:174
sub
static float sub(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:31
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
VideoState::audio_hw_buf_size
int audio_hw_buf_size
Definition: ffplay.c:242
decoder_decode_frame
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
Definition: ffplay.c:589
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AV_DISPOSITION_ATTACHED_PIC
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:815
FrameQueue::cond
SDL_cond * cond
Definition: ffplay.c:177
av_fifo_grow
int av_fifo_grow(AVFifoBuffer *f, unsigned int size)
Enlarge an AVFifoBuffer.
Definition: fifo.c:107
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:140
AVStream::discard
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:894
av_find_program_from_stream
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: utils.c:485
display_disable
static int display_disable
Definition: ffplay.c:324
screen_width
static int screen_width
Definition: ffplay.c:314
DEBUG
#define DEBUG
Definition: vf_framerate.c:29
sws_dict
AVDictionary * sws_dict
Definition: cmdutils.c:69
swr_set_compensation
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:897
SAMPLE_ARRAY_SIZE
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:104
rect
Definition: f_ebur128.c:77
update_volume
static void update_volume(VideoState *is, int sign, double step)
Definition: ffplay.c:1509
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
VideoState::auddec
Decoder auddec
Definition: ffplay.c:226
screen_left
static int screen_left
Definition: ffplay.c:316
av_opt_set_int_list
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:713
AudioParams::frame_size
int frame_size
Definition: ffplay.c:138
AVSubtitleRect
Definition: avcodec.h:2261
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
Decoder::next_pts
int64_t next_pts
Definition: ffplay.c:197
decoder_start
static int decoder_start(Decoder *d, int(*fn)(void *), const char *thread_name, void *arg)
Definition: ffplay.c:2120
rect::y
int y
Definition: f_ebur128.c:77
FrameQueue::size
int size
Definition: ffplay.c:172
avformat_get_class
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:188
av_unused
#define av_unused
Definition: attributes.h:131
Frame::sar
AVRational sar
Definition: ffplay.c:163
AudioParams::channel_layout
int64_t channel_layout
Definition: ffplay.c:136
out_size
int out_size
Definition: movenc.c:55
graph
fg outputs[0] graph
Definition: ffmpeg_filter.c:174
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:112
upload_texture
static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx)
Definition: ffplay.c:906
VideoState::vis_texture
SDL_Texture * vis_texture
Definition: ffplay.c:271
queue_picture
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
Definition: ffplay.c:1742
av_get_channel_layout_string
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
Definition: channel_layout.c:217
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
AudioParams
Definition: ffplay.c:133
VideoState::subtitle_st
AVStream * subtitle_st
Definition: ffplay.c:276
pixdesc.h
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1165
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
VideoState::frame_last_filter_delay
double frame_last_filter_delay
Definition: ffplay.c:281
AVFrame::width
int width
Definition: frame.h:361
VideoState::xleft
int xleft
Definition: ffplay.c:291
Frame::pts
double pts
Definition: ffplay.c:157
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:576
OPT_INPUT
#define OPT_INPUT
Definition: cmdutils.h:183
avcodec_decode_subtitle2
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:806
frame_queue_init
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
Definition: ffplay.c:689
subtitle_codec_name
static const char * subtitle_codec_name
Definition: ffplay.c:344
EXTERNAL_CLOCK_MIN_FRAMES
#define EXTERNAL_CLOCK_MIN_FRAMES
Definition: ffplay.c:68
HAS_ARG
#define HAS_ARG
Definition: cmdutils.h:166
b
#define b
Definition: input.c:40
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:146
AVChapter::start
int64_t start
Definition: avformat.h:1059
Clock
Definition: ffplay.c:142
data
const char data[16]
Definition: mxf.c:143
frame_queue_destory
static void frame_queue_destory(FrameQueue *f)
Definition: ffplay.c:710
SAMPLE_QUEUE_SIZE
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:130
program_name
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:63
Decoder::queue
PacketQueue * queue
Definition: ffplay.c:189
format_opts
AVDictionary * format_opts
Definition: cmdutils.c:71
AVSEEK_FLAG_BYTE
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2152
AVIOContext::error
int error
contains the error code or 0 if no error happened
Definition: avio.h:240
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:68
AV_PIX_FMT_RGB32_1
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:363
VideoState::audio_diff_avg_coef
double audio_diff_avg_coef
Definition: ffplay.c:237
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
CURSOR_HIDE_DELAY
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:106
SDL_VOLUME_STEP
#define SDL_VOLUME_STEP
Definition: ffplay.c:77
show_help_children
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
Definition: cmdutils.c:198
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
autorotate
static int autorotate
Definition: ffplay.c:354
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:391
TextureFormatEntry::texture_fmt
int texture_fmt
Definition: ffplay.c:371
video_disable
static int video_disable
Definition: ffplay.c:319
Frame::uploaded
int uploaded
Definition: ffplay.c:164
mathematics.h
sws_scale
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:1196
AVDictionary
Definition: dict.c:30
compute_target_delay
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1524
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Frame
Definition: ffplay.c:153
stream_close
static void stream_close(VideoState *is)
Definition: ffplay.c:1260
av_read_frame
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: demux.c:1414
av_fifo_generic_read
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
Definition: fifo.c:213
SDL_AUDIO_MAX_CALLBACKS_PER_SEC
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:74
AVFMT_NOBINSEARCH
#define AVFMT_NOBINSEARCH
Format does not allow to fall back on binary search via read_timestamp.
Definition: avformat.h:473
VideoState::paused
int paused
Definition: ffplay.c:207
init_clock
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1405
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
AV_OPT_FLAG_FILTERING_PARAM
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:293
opt_seek
static int opt_seek(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3526
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:338
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:101
get_master_clock
static double get_master_clock(VideoState *is)
Definition: ffplay.c:1438
VideoState::width
int width
Definition: ffplay.c:291
file_iformat
static const AVInputFormat * file_iformat
Definition: ffplay.c:309
sample_rate
sample_rate
Definition: ffmpeg_filter.c:156
dummy
static int dummy
Definition: ffplay.c:3582
update_video_pts
static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial)
Definition: ffplay.c:1566
FF_QUIT_EVENT
#define FF_QUIT_EVENT
Definition: ffplay.c:362
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:75
PacketQueue
Definition: ffplay.c:117
subtitle_thread
static int subtitle_thread(void *arg)
Definition: ffplay.c:2238
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:317
VideoState::last_subtitle_stream
int last_subtitle_stream
Definition: ffplay.c:303
VideoState::SHOW_MODE_NONE
@ SHOW_MODE_NONE
Definition: ffplay.c:261
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:121
OptionDef
Definition: cmdutils.h:163
audio_decode_frame
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2341
subtitle_disable
static int subtitle_disable
Definition: ffplay.c:320
VideoState::pictq
FrameQueue pictq
Definition: ffplay.c:222
genpts
static int genpts
Definition: ffplay.c:333
VideoState::swr_ctx
struct SwrContext * swr_ctx
Definition: ffplay.c:256
AVFifoBuffer
Definition: fifo.h:31
opt_sync
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3511
av_find_best_stream
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, const AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
Definition: utils.c:500
step_to_next_frame
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1516
VideoState::sampq
FrameQueue sampq
Definition: ffplay.c:224
TextureFormatEntry::format
enum AVPixelFormat format
Definition: ffplay.c:370
FrameQueue::rindex
int rindex
Definition: ffplay.c:170
video_display
static void video_display(VideoState *is)
Definition: ffplay.c:1359
AVCodec::max_lowres
uint8_t max_lowres
maximum value for lowres supported by the decoder
Definition: codec.h:222
avformat_close_input
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: demux.c:355
parse_number_or_die
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max)
Parse a string and return its corresponding value as a double.
Definition: cmdutils.c:136
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:508
AVFormatContext::interrupt_callback
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1364
SDL_AUDIO_MIN_BUFFER_SIZE
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:72
print_error
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1087
startup_volume
static int startup_volume
Definition: ffplay.c:327
window
static SDL_Window * window
Definition: ffplay.c:364
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
Definition: avfiltergraph.c:140
AVCodecParameters::channels
int channels
Audio only.
Definition: codec_par.h:166
fifo.h
toggle_full_screen
static void toggle_full_screen(VideoState *is)
Definition: ffplay.c:3215
Clock::queue_serial
int * queue_serial
Definition: ffplay.c:149
VideoState::extclk
Clock extclk
Definition: ffplay.c:220
VideoState::seek_flags
int seek_flags
Definition: ffplay.c:211
alwaysontop
static int alwaysontop
Definition: ffplay.c:326
VideoState::audio_st
AVStream * audio_st
Definition: ffplay.c:240
packet_queue_init
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:477
AUDIO_DIFF_AVG_NB
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:97
fail
#define fail()
Definition: checkasm.h:127
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:84
opt_duration
static int opt_duration(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3532
FrameQueue
Definition: ffplay.c:168
packet_queue_put
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:448
samplefmt.h
AVSubtitleRect::x
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2262
VideoState::video_stream
int video_stream
Definition: ffplay.c:282
autoexit
static int autoexit
Definition: ffplay.c:336
avio_tell
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:486
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
AVChapter
Definition: avformat.h:1056
video_image_display
static void video_image_display(VideoState *is)
Definition: ffplay.c:973
val
static double val(void *priv, double ch)
Definition: aeval.c:76
AV_SYNC_AUDIO_MASTER
@ AV_SYNC_AUDIO_MASTER
Definition: ffplay.c:182
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
opt_show_mode
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3538
Decoder::empty_queue_cond
SDL_cond * empty_queue_cond
Definition: ffplay.c:194
pts
static int64_t pts
Definition: transcode_aac.c:653
set_clock_speed
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1399
VideoState::audio_diff_threshold
double audio_diff_threshold
Definition: ffplay.c:238
sws_get_class
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
Definition: options.c:99
AV_PKT_DATA_DISPLAYMATRIX
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:108
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:468
VideoState::audio_diff_cum
double audio_diff_cum
Definition: ffplay.c:236
VideoState::last_video_stream
int last_video_stream
Definition: ffplay.c:303
OPT_STRING
#define OPT_STRING
Definition: cmdutils.h:169
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
fast
static int fast
Definition: ffplay.c:332
fn
#define fn(a)
Definition: colorspacedsp_template.c:42
loop
static int loop
Definition: ffplay.c:339
AVRational::num
int num
Numerator.
Definition: rational.h:59
avformat_network_init
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:1252
VideoState::rdft_bits
int rdft_bits
Definition: ffplay.c:267
opt_height
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3489
avsubtitle_free
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: avcodec.c:429
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:99
sdl_texture_format_map
static const struct TextureFormatEntry sdl_texture_format_map[]
AVFormatContext::bit_rate
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1206
is_full_screen
static int is_full_screen
Definition: ffplay.c:359
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:212
set_sdl_yuv_conversion_mode
static void set_sdl_yuv_conversion_mode(AVFrame *frame)
Definition: ffplay.c:957
lrint
#define lrint
Definition: tablegen.h:53
Frame::flip_v
int flip_v
Definition: ffplay.c:165
av_guess_sample_aspect_ratio
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio.
Definition: utils.c:1315
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:206
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_fifo_space
int av_fifo_space(const AVFifoBuffer *f)
Return the amount of space in bytes in the AVFifoBuffer, that is the amount of data you can write int...
Definition: fifo.c:82
AVFormatContext::metadata
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1326
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
AVInputFormat
Definition: avformat.h:626
audio_thread
static int audio_thread(void *arg)
Definition: ffplay.c:2033
set_clock
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1393
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
av_dump_format
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
Definition: dump.c:621
VideoState
Definition: ffplay.c:202
frame_queue_peek_next
static Frame * frame_queue_peek_next(FrameQueue *f)
Definition: ffplay.c:734
sdl_audio_callback
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
Definition: ffplay.c:2454
sync_clock_to_slave
static void sync_clock_to_slave(Clock *c, Clock *slave)
Definition: ffplay.c:1413
swr_init
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:152
opt_input_file
static void opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3547
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
avformat_open_input
int avformat_open_input(AVFormatContext **ps, const char *url, const AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: demux.c:207
frame_queue_signal
static void frame_queue_signal(FrameQueue *f)
Definition: ffplay.c:722
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
OPT_INT
#define OPT_INT
Definition: cmdutils.h:172
VideoState::img_convert_ctx
struct SwsContext * img_convert_ctx
Definition: ffplay.c:286
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:141
width
#define width
VideoState::ShowMode
ShowMode
Definition: ffplay.c:260
Decoder::avctx
AVCodecContext * avctx
Definition: ffplay.c:190
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_OPT_FLAG_ENCODING_PARAM
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
Definition: opt.h:277
show_help_default
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3647
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
default_height
static int default_height
Definition: ffplay.c:313
AVFormatContext::flags
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1215
AVCodecParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only.
Definition: codec_par.h:136
AVFrame::channels
int channels
number of audio channels, only used for audio.
Definition: frame.h:592
AVInputFormat::name
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:631
AVFormatContext::iformat
const struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1109
AV_PIX_FMT_0BGR32
#define AV_PIX_FMT_0BGR32
Definition: pixfmt.h:367
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:509
AVDictionaryEntry::key
char * key
Definition: dict.h:80
Clock::last_updated
double last_updated
Definition: ffplay.c:145
PacketQueue::duration
int64_t duration
Definition: ffplay.c:121
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
AVSubtitleRect::y
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2263
AVCodecParameters::width
int width
Video only.
Definition: codec_par.h:126
calculate_display_rect
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
Definition: ffplay.c:861
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
screen_height
static int screen_height
Definition: ffplay.c:315
EXTERNAL_CLOCK_SPEED_STEP
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:94
AV_CH_LAYOUT_STEREO_DOWNMIX
#define AV_CH_LAYOUT_STEREO_DOWNMIX
Definition: channel_layout.h:117
Decoder::pkt_serial
int pkt_serial
Definition: ffplay.c:191
opt_frame_pix_fmt
static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3505
avcodec_receive_frame
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:643
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
AVMEDIA_TYPE_NB
@ AVMEDIA_TYPE_NB
Definition: avutil.h:206
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:192
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:290
AV_SYNC_EXTERNAL_CLOCK
@ AV_SYNC_EXTERNAL_CLOCK
Definition: ffplay.c:184
av_read_play
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: utils.c:568
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
opt_codec
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3560
Clock::pts_drift
double pts_drift
Definition: ffplay.c:144
swr_convert
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t *out_arg[SWR_CH_MAX], int out_count, const uint8_t *in_arg[SWR_CH_MAX], int in_count)
Definition: swresample.c:714
VideoState::videoq
PacketQueue videoq
Definition: ffplay.c:284
ctx
AVFormatContext * ctx
Definition: movenc.c:48
channels
channels
Definition: aptx.h:33
limits.h
REFRESH_RATE
#define REFRESH_RATE
Definition: ffplay.c:100
FrameQueue::rindex_shown
int rindex_shown
Definition: ffplay.c:175
nb_streams
static int nb_streams
Definition: ffprobe.c:289
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:141
VideoState::force_refresh
int force_refresh
Definition: ffplay.c:206
get_clock
static double get_clock(Clock *c)
Definition: ffplay.c:1373
screen_top
static int screen_top
Definition: ffplay.c:317
VideoState::audio_diff_avg_count
int audio_diff_avg_count
Definition: ffplay.c:239
EXTERNAL_CLOCK_SPEED_MIN
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:92
sws_flags
static unsigned sws_flags
Definition: ffplay.c:110
parse_options
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:374
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
renderer
static SDL_Renderer * renderer
Definition: ffplay.c:365
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
SwrContext
The libswresample context.
Definition: swresample_internal.h:95
vp_duration
static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp)
Definition: ffplay.c:1554
swr_alloc_set_opts
struct SwrContext * swr_alloc_set_opts(struct SwrContext *s, int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
Definition: swresample.c:59
NAN
#define NAN
Definition: mathematics.h:64
f
#define f(width, name)
Definition: cbs_vp9.c:255
AV_PIX_FMT_BGR32_1
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:365
VideoState::step
int step
Definition: ffplay.c:292
av_rdft_calc
void av_rdft_calc(RDFTContext *s, FFTSample *data)
synchronize_audio
static int synchronize_audio(VideoState *is, int nb_samples)
Definition: ffplay.c:2293
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:393
arg
const char * arg
Definition: jacosubdec.c:67
if
if(ret)
Definition: filter_design.txt:179
window_title
static const char * window_title
Definition: ffplay.c:311
Clock::speed
double speed
Definition: ffplay.c:146
VideoState::SHOW_MODE_VIDEO
@ SHOW_MODE_VIDEO
Definition: ffplay.c:261
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:54
AVFormatContext
Format I/O context.
Definition: avformat.h:1097
av_log_get_level
int av_log_get_level(void)
Get the current log level.
Definition: log.c:435
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:557
init_dynload
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:112
opts
AVDictionary * opts
Definition: movenc.c:50
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:992
AVSubtitleRect::w
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:2264
seek_chapter
static void seek_chapter(VideoState *is, int incr)
Definition: ffplay.c:3250
get_master_sync_type
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1421
avcodec_get_class
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:174
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1160
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:868
NULL
#define NULL
Definition: coverity.c:32
avcodec_find_decoder_by_name
const AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
Definition: allcodecs.c:945
FrameQueue::max_size
int max_size
Definition: ffplay.c:173
OPT_EXPERT
#define OPT_EXPERT
Definition: cmdutils.h:168
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
Decoder
Definition: ffplay.c:187
AudioParams::freq
int freq
Definition: ffplay.c:134
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:156
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
isnan
#define isnan(x)
Definition: libm.h:340
stream_cycle_channel
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3136
VideoState::frame_drops_late
int frame_drops_late
Definition: ffplay.c:258
AV_CODEC_FLAG2_FAST
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:287
VideoState::rdft
RDFTContext * rdft
Definition: ffplay.c:266
AVFormatContext::pb
AVIOContext * pb
I/O context.
Definition: avformat.h:1139
av_log_set_flags
void av_log_set_flags(int arg)
Definition: log.c:445
parseutils.h