FFmpeg
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
34 #include "libavutil/eval.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/avassert.h"
42 #include "libavutil/time.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavcodec/avfft.h"
49 
50 #if CONFIG_AVFILTER
51 # include "libavfilter/avfilter.h"
52 # include "libavfilter/buffersink.h"
53 # include "libavfilter/buffersrc.h"
54 #endif
55 
56 #include <SDL.h>
57 #include <SDL_thread.h>
58 
59 #include "cmdutils.h"
60 
61 #include <assert.h>
62 
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65 
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 25
68 #define EXTERNAL_CLOCK_MIN_FRAMES 2
69 #define EXTERNAL_CLOCK_MAX_FRAMES 10
70 
71 /* Minimum SDL audio buffer size, in samples. */
72 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
73 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
74 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
75 
76 /* Step size for volume control in dB */
77 #define SDL_VOLUME_STEP (0.75)
78 
79 /* no AV sync correction is done if below the minimum AV sync threshold */
80 #define AV_SYNC_THRESHOLD_MIN 0.04
81 /* AV sync correction is done if above the maximum AV sync threshold */
82 #define AV_SYNC_THRESHOLD_MAX 0.1
83 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
84 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
85 /* no AV correction is done if too big error */
86 #define AV_NOSYNC_THRESHOLD 10.0
87 
88 /* maximum audio speed change to get correct sync */
89 #define SAMPLE_CORRECTION_PERCENT_MAX 10
90 
91 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
92 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
93 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
94 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
95 
96 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
97 #define AUDIO_DIFF_AVG_NB 20
98 
99 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
100 #define REFRESH_RATE 0.01
101 
102 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
103 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
104 #define SAMPLE_ARRAY_SIZE (8 * 65536)
105 
106 #define CURSOR_HIDE_DELAY 1000000
107 
108 #define USE_ONEPASS_SUBTITLE_RENDER 1
109 
110 static unsigned sws_flags = SWS_BICUBIC;
111 
112 typedef struct MyAVPacketList {
115  int serial;
117 
118 typedef struct PacketQueue {
119  MyAVPacketList *first_pkt, *last_pkt;
121  int size;
122  int64_t duration;
124  int serial;
125  SDL_mutex *mutex;
126  SDL_cond *cond;
127 } PacketQueue;
128 
129 #define VIDEO_PICTURE_QUEUE_SIZE 3
130 #define SUBPICTURE_QUEUE_SIZE 16
131 #define SAMPLE_QUEUE_SIZE 9
132 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
133 
134 typedef struct AudioParams {
135  int freq;
136  int channels;
137  int64_t channel_layout;
141 } AudioParams;
142 
143 typedef struct Clock {
144  double pts; /* clock base */
145  double pts_drift; /* clock base minus time at which we updated the clock */
146  double last_updated;
147  double speed;
148  int serial; /* clock is based on a packet with this serial */
149  int paused;
150  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
151 } Clock;
152 
153 /* Common struct for handling all types of decoded data and allocated render buffers. */
154 typedef struct Frame {
157  int serial;
158  double pts; /* presentation timestamp for the frame */
159  double duration; /* estimated duration of the frame */
160  int64_t pos; /* byte position of the frame in the input file */
161  int width;
162  int height;
163  int format;
165  int uploaded;
166  int flip_v;
167 } Frame;
168 
169 typedef struct FrameQueue {
171  int rindex;
172  int windex;
173  int size;
174  int max_size;
177  SDL_mutex *mutex;
178  SDL_cond *cond;
180 } FrameQueue;
181 
182 enum {
183  AV_SYNC_AUDIO_MASTER, /* default choice */
185  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
186 };
187 
188 typedef struct Decoder {
193  int finished;
195  SDL_cond *empty_queue_cond;
196  int64_t start_pts;
198  int64_t next_pts;
200  SDL_Thread *decoder_tid;
201 } Decoder;
202 
203 typedef struct VideoState {
204  SDL_Thread *read_tid;
208  int paused;
211  int seek_req;
213  int64_t seek_pos;
214  int64_t seek_rel;
217  int realtime;
218 
222 
226 
230 
232 
234 
235  double audio_clock;
237  double audio_diff_cum; /* used for AV difference average computation */
246  unsigned int audio_buf_size; /* in bytes */
247  unsigned int audio_buf1_size;
248  int audio_buf_index; /* in bytes */
251  int muted;
252  struct AudioParams audio_src;
253 #if CONFIG_AVFILTER
254  struct AudioParams audio_filter_src;
255 #endif
256  struct AudioParams audio_tgt;
260 
261  enum ShowMode {
262  SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
263  } show_mode;
264  int16_t sample_array[SAMPLE_ARRAY_SIZE];
270  int xpos;
272  SDL_Texture *vis_texture;
273  SDL_Texture *sub_texture;
274  SDL_Texture *vid_texture;
275 
279 
280  double frame_timer;
286  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
289  int eof;
290 
291  char *filename;
292  int width, height, xleft, ytop;
293  int step;
294 
295 #if CONFIG_AVFILTER
296  int vfilter_idx;
297  AVFilterContext *in_video_filter; // the first filter in the video chain
298  AVFilterContext *out_video_filter; // the last filter in the video chain
299  AVFilterContext *in_audio_filter; // the first filter in the audio chain
300  AVFilterContext *out_audio_filter; // the last filter in the audio chain
301  AVFilterGraph *agraph; // audio filter graph
302 #endif
303 
304  int last_video_stream, last_audio_stream, last_subtitle_stream;
305 
307 } VideoState;
308 
309 /* options specified by the user */
311 static const char *input_filename;
312 static const char *window_title;
313 static int default_width = 640;
314 static int default_height = 480;
315 static int screen_width = 0;
316 static int screen_height = 0;
317 static int screen_left = SDL_WINDOWPOS_CENTERED;
318 static int screen_top = SDL_WINDOWPOS_CENTERED;
319 static int audio_disable;
320 static int video_disable;
321 static int subtitle_disable;
322 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
323 static int seek_by_bytes = -1;
324 static float seek_interval = 10;
325 static int display_disable;
326 static int borderless;
327 static int alwaysontop;
328 static int startup_volume = 100;
329 static int show_status = 1;
331 static int64_t start_time = AV_NOPTS_VALUE;
332 static int64_t duration = AV_NOPTS_VALUE;
333 static int fast = 0;
334 static int genpts = 0;
335 static int lowres = 0;
336 static int decoder_reorder_pts = -1;
337 static int autoexit;
338 static int exit_on_keydown;
339 static int exit_on_mousedown;
340 static int loop = 1;
341 static int framedrop = -1;
342 static int infinite_buffer = -1;
343 static enum ShowMode show_mode = SHOW_MODE_NONE;
344 static const char *audio_codec_name;
345 static const char *subtitle_codec_name;
346 static const char *video_codec_name;
347 double rdftspeed = 0.02;
348 static int64_t cursor_last_shown;
349 static int cursor_hidden = 0;
350 #if CONFIG_AVFILTER
351 static const char **vfilters_list = NULL;
352 static int nb_vfilters = 0;
353 static char *afilters = NULL;
354 #endif
355 static int autorotate = 1;
356 static int find_stream_info = 1;
357 static int filter_nbthreads = 0;
358 
359 /* current context */
360 static int is_full_screen;
361 static int64_t audio_callback_time;
362 
364 
365 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
366 
367 static SDL_Window *window;
368 static SDL_Renderer *renderer;
369 static SDL_RendererInfo renderer_info = {0};
370 static SDL_AudioDeviceID audio_dev;
371 
372 static const struct TextureFormatEntry {
376  { AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
377  { AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
378  { AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
379  { AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
380  { AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
381  { AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
382  { AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
383  { AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
384  { AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
385  { AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
386  { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
387  { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
388  { AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
389  { AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
390  { AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
391  { AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
392  { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
393  { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
394  { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
395  { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
396 };
397 
398 #if CONFIG_AVFILTER
399 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
400 {
401  GROW_ARRAY(vfilters_list, nb_vfilters);
402  vfilters_list[nb_vfilters - 1] = arg;
403  return 0;
404 }
405 #endif
406 
407 static inline
408 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
409  enum AVSampleFormat fmt2, int64_t channel_count2)
410 {
411  /* If channel count == 1, planar and non-planar formats are the same */
412  if (channel_count1 == 1 && channel_count2 == 1)
414  else
415  return channel_count1 != channel_count2 || fmt1 != fmt2;
416 }
417 
418 static inline
419 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
420 {
421  if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
422  return channel_layout;
423  else
424  return 0;
425 }
426 
428 {
429  MyAVPacketList *pkt1;
430 
431  if (q->abort_request)
432  return -1;
433 
434  pkt1 = av_malloc(sizeof(MyAVPacketList));
435  if (!pkt1)
436  return -1;
437  pkt1->pkt = *pkt;
438  pkt1->next = NULL;
439  if (pkt == &flush_pkt)
440  q->serial++;
441  pkt1->serial = q->serial;
442 
443  if (!q->last_pkt)
444  q->first_pkt = pkt1;
445  else
446  q->last_pkt->next = pkt1;
447  q->last_pkt = pkt1;
448  q->nb_packets++;
449  q->size += pkt1->pkt.size + sizeof(*pkt1);
450  q->duration += pkt1->pkt.duration;
451  /* XXX: should duplicate packet data in DV case */
452  SDL_CondSignal(q->cond);
453  return 0;
454 }
455 
457 {
458  int ret;
459 
460  SDL_LockMutex(q->mutex);
461  ret = packet_queue_put_private(q, pkt);
462  SDL_UnlockMutex(q->mutex);
463 
464  if (pkt != &flush_pkt && ret < 0)
465  av_packet_unref(pkt);
466 
467  return ret;
468 }
469 
470 static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
471 {
472  AVPacket pkt1, *pkt = &pkt1;
473  av_init_packet(pkt);
474  pkt->data = NULL;
475  pkt->size = 0;
476  pkt->stream_index = stream_index;
477  return packet_queue_put(q, pkt);
478 }
479 
480 /* packet queue handling */
482 {
483  memset(q, 0, sizeof(PacketQueue));
484  q->mutex = SDL_CreateMutex();
485  if (!q->mutex) {
486  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
487  return AVERROR(ENOMEM);
488  }
489  q->cond = SDL_CreateCond();
490  if (!q->cond) {
491  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
492  return AVERROR(ENOMEM);
493  }
494  q->abort_request = 1;
495  return 0;
496 }
497 
499 {
500  MyAVPacketList *pkt, *pkt1;
501 
502  SDL_LockMutex(q->mutex);
503  for (pkt = q->first_pkt; pkt; pkt = pkt1) {
504  pkt1 = pkt->next;
505  av_packet_unref(&pkt->pkt);
506  av_freep(&pkt);
507  }
508  q->last_pkt = NULL;
509  q->first_pkt = NULL;
510  q->nb_packets = 0;
511  q->size = 0;
512  q->duration = 0;
513  SDL_UnlockMutex(q->mutex);
514 }
515 
517 {
519  SDL_DestroyMutex(q->mutex);
520  SDL_DestroyCond(q->cond);
521 }
522 
524 {
525  SDL_LockMutex(q->mutex);
526 
527  q->abort_request = 1;
528 
529  SDL_CondSignal(q->cond);
530 
531  SDL_UnlockMutex(q->mutex);
532 }
533 
535 {
536  SDL_LockMutex(q->mutex);
537  q->abort_request = 0;
538  packet_queue_put_private(q, &flush_pkt);
539  SDL_UnlockMutex(q->mutex);
540 }
541 
542 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
544 {
545  MyAVPacketList *pkt1;
546  int ret;
547 
548  SDL_LockMutex(q->mutex);
549 
550  for (;;) {
551  if (q->abort_request) {
552  ret = -1;
553  break;
554  }
555 
556  pkt1 = q->first_pkt;
557  if (pkt1) {
558  q->first_pkt = pkt1->next;
559  if (!q->first_pkt)
560  q->last_pkt = NULL;
561  q->nb_packets--;
562  q->size -= pkt1->pkt.size + sizeof(*pkt1);
563  q->duration -= pkt1->pkt.duration;
564  *pkt = pkt1->pkt;
565  if (serial)
566  *serial = pkt1->serial;
567  av_free(pkt1);
568  ret = 1;
569  break;
570  } else if (!block) {
571  ret = 0;
572  break;
573  } else {
574  SDL_CondWait(q->cond, q->mutex);
575  }
576  }
577  SDL_UnlockMutex(q->mutex);
578  return ret;
579 }
580 
581 static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
582  memset(d, 0, sizeof(Decoder));
583  d->avctx = avctx;
584  d->queue = queue;
585  d->empty_queue_cond = empty_queue_cond;
587  d->pkt_serial = -1;
588 }
589 
591  int ret = AVERROR(EAGAIN);
592 
593  for (;;) {
594  AVPacket pkt;
595 
596  if (d->queue->serial == d->pkt_serial) {
597  do {
598  if (d->queue->abort_request)
599  return -1;
600 
601  switch (d->avctx->codec_type) {
602  case AVMEDIA_TYPE_VIDEO:
603  ret = avcodec_receive_frame(d->avctx, frame);
604  if (ret >= 0) {
605  if (decoder_reorder_pts == -1) {
606  frame->pts = frame->best_effort_timestamp;
607  } else if (!decoder_reorder_pts) {
608  frame->pts = frame->pkt_dts;
609  }
610  }
611  break;
612  case AVMEDIA_TYPE_AUDIO:
613  ret = avcodec_receive_frame(d->avctx, frame);
614  if (ret >= 0) {
615  AVRational tb = (AVRational){1, frame->sample_rate};
616  if (frame->pts != AV_NOPTS_VALUE)
617  frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);
618  else if (d->next_pts != AV_NOPTS_VALUE)
619  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
620  if (frame->pts != AV_NOPTS_VALUE) {
621  d->next_pts = frame->pts + frame->nb_samples;
622  d->next_pts_tb = tb;
623  }
624  }
625  break;
626  }
627  if (ret == AVERROR_EOF) {
628  d->finished = d->pkt_serial;
630  return 0;
631  }
632  if (ret >= 0)
633  return 1;
634  } while (ret != AVERROR(EAGAIN));
635  }
636 
637  do {
638  if (d->queue->nb_packets == 0)
639  SDL_CondSignal(d->empty_queue_cond);
640  if (d->packet_pending) {
641  av_packet_move_ref(&pkt, &d->pkt);
642  d->packet_pending = 0;
643  } else {
644  if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
645  return -1;
646  }
647  if (d->queue->serial == d->pkt_serial)
648  break;
649  av_packet_unref(&pkt);
650  } while (1);
651 
652  if (pkt.data == flush_pkt.data) {
654  d->finished = 0;
655  d->next_pts = d->start_pts;
656  d->next_pts_tb = d->start_pts_tb;
657  } else {
659  int got_frame = 0;
660  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &pkt);
661  if (ret < 0) {
662  ret = AVERROR(EAGAIN);
663  } else {
664  if (got_frame && !pkt.data) {
665  d->packet_pending = 1;
666  av_packet_move_ref(&d->pkt, &pkt);
667  }
668  ret = got_frame ? 0 : (pkt.data ? AVERROR(EAGAIN) : AVERROR_EOF);
669  }
670  } else {
671  if (avcodec_send_packet(d->avctx, &pkt) == AVERROR(EAGAIN)) {
672  av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
673  d->packet_pending = 1;
674  av_packet_move_ref(&d->pkt, &pkt);
675  }
676  }
677  av_packet_unref(&pkt);
678  }
679  }
680 }
681 
682 static void decoder_destroy(Decoder *d) {
683  av_packet_unref(&d->pkt);
685 }
686 
688 {
689  av_frame_unref(vp->frame);
690  avsubtitle_free(&vp->sub);
691 }
692 
693 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
694 {
695  int i;
696  memset(f, 0, sizeof(FrameQueue));
697  if (!(f->mutex = SDL_CreateMutex())) {
698  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
699  return AVERROR(ENOMEM);
700  }
701  if (!(f->cond = SDL_CreateCond())) {
702  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
703  return AVERROR(ENOMEM);
704  }
705  f->pktq = pktq;
706  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
707  f->keep_last = !!keep_last;
708  for (i = 0; i < f->max_size; i++)
709  if (!(f->queue[i].frame = av_frame_alloc()))
710  return AVERROR(ENOMEM);
711  return 0;
712 }
713 
715 {
716  int i;
717  for (i = 0; i < f->max_size; i++) {
718  Frame *vp = &f->queue[i];
720  av_frame_free(&vp->frame);
721  }
722  SDL_DestroyMutex(f->mutex);
723  SDL_DestroyCond(f->cond);
724 }
725 
727 {
728  SDL_LockMutex(f->mutex);
729  SDL_CondSignal(f->cond);
730  SDL_UnlockMutex(f->mutex);
731 }
732 
734 {
735  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
736 }
737 
739 {
740  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
741 }
742 
744 {
745  return &f->queue[f->rindex];
746 }
747 
749 {
750  /* wait until we have space to put a new frame */
751  SDL_LockMutex(f->mutex);
752  while (f->size >= f->max_size &&
753  !f->pktq->abort_request) {
754  SDL_CondWait(f->cond, f->mutex);
755  }
756  SDL_UnlockMutex(f->mutex);
757 
758  if (f->pktq->abort_request)
759  return NULL;
760 
761  return &f->queue[f->windex];
762 }
763 
765 {
766  /* wait until we have a readable a new frame */
767  SDL_LockMutex(f->mutex);
768  while (f->size - f->rindex_shown <= 0 &&
769  !f->pktq->abort_request) {
770  SDL_CondWait(f->cond, f->mutex);
771  }
772  SDL_UnlockMutex(f->mutex);
773 
774  if (f->pktq->abort_request)
775  return NULL;
776 
777  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
778 }
779 
781 {
782  if (++f->windex == f->max_size)
783  f->windex = 0;
784  SDL_LockMutex(f->mutex);
785  f->size++;
786  SDL_CondSignal(f->cond);
787  SDL_UnlockMutex(f->mutex);
788 }
789 
791 {
792  if (f->keep_last && !f->rindex_shown) {
793  f->rindex_shown = 1;
794  return;
795  }
797  if (++f->rindex == f->max_size)
798  f->rindex = 0;
799  SDL_LockMutex(f->mutex);
800  f->size--;
801  SDL_CondSignal(f->cond);
802  SDL_UnlockMutex(f->mutex);
803 }
804 
805 /* return the number of undisplayed frames in the queue */
807 {
808  return f->size - f->rindex_shown;
809 }
810 
811 /* return last shown position */
813 {
814  Frame *fp = &f->queue[f->rindex];
815  if (f->rindex_shown && fp->serial == f->pktq->serial)
816  return fp->pos;
817  else
818  return -1;
819 }
820 
821 static void decoder_abort(Decoder *d, FrameQueue *fq)
822 {
824  frame_queue_signal(fq);
825  SDL_WaitThread(d->decoder_tid, NULL);
826  d->decoder_tid = NULL;
828 }
829 
830 static inline void fill_rectangle(int x, int y, int w, int h)
831 {
832  SDL_Rect rect;
833  rect.x = x;
834  rect.y = y;
835  rect.w = w;
836  rect.h = h;
837  if (w && h)
838  SDL_RenderFillRect(renderer, &rect);
839 }
840 
841 static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
842 {
843  Uint32 format;
844  int access, w, h;
845  if (!*texture || SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
846  void *pixels;
847  int pitch;
848  if (*texture)
849  SDL_DestroyTexture(*texture);
850  if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
851  return -1;
852  if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
853  return -1;
854  if (init_texture) {
855  if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
856  return -1;
857  memset(pixels, 0, pitch * new_height);
858  SDL_UnlockTexture(*texture);
859  }
860  av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
861  }
862  return 0;
863 }
864 
865 static void calculate_display_rect(SDL_Rect *rect,
866  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
867  int pic_width, int pic_height, AVRational pic_sar)
868 {
869  AVRational aspect_ratio = pic_sar;
870  int64_t width, height, x, y;
871 
872  if (av_cmp_q(aspect_ratio, av_make_q(0, 1)) <= 0)
873  aspect_ratio = av_make_q(1, 1);
874 
875  aspect_ratio = av_mul_q(aspect_ratio, av_make_q(pic_width, pic_height));
876 
877  /* XXX: we suppose the screen has a 1.0 pixel ratio */
878  height = scr_height;
879  width = av_rescale(height, aspect_ratio.num, aspect_ratio.den) & ~1;
880  if (width > scr_width) {
881  width = scr_width;
882  height = av_rescale(width, aspect_ratio.den, aspect_ratio.num) & ~1;
883  }
884  x = (scr_width - width) / 2;
885  y = (scr_height - height) / 2;
886  rect->x = scr_xleft + x;
887  rect->y = scr_ytop + y;
888  rect->w = FFMAX((int)width, 1);
889  rect->h = FFMAX((int)height, 1);
890 }
891 
892 static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
893 {
894  int i;
895  *sdl_blendmode = SDL_BLENDMODE_NONE;
896  *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
897  if (format == AV_PIX_FMT_RGB32 ||
898  format == AV_PIX_FMT_RGB32_1 ||
899  format == AV_PIX_FMT_BGR32 ||
900  format == AV_PIX_FMT_BGR32_1)
901  *sdl_blendmode = SDL_BLENDMODE_BLEND;
902  for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
903  if (format == sdl_texture_format_map[i].format) {
904  *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
905  return;
906  }
907  }
908 }
909 
910 static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx) {
911  int ret = 0;
912  Uint32 sdl_pix_fmt;
913  SDL_BlendMode sdl_blendmode;
914  get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
915  if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
916  return -1;
917  switch (sdl_pix_fmt) {
918  case SDL_PIXELFORMAT_UNKNOWN:
919  /* This should only happen if we are not using avfilter... */
920  *img_convert_ctx = sws_getCachedContext(*img_convert_ctx,
921  frame->width, frame->height, frame->format, frame->width, frame->height,
923  if (*img_convert_ctx != NULL) {
924  uint8_t *pixels[4];
925  int pitch[4];
926  if (!SDL_LockTexture(*tex, NULL, (void **)pixels, pitch)) {
927  sws_scale(*img_convert_ctx, (const uint8_t * const *)frame->data, frame->linesize,
928  0, frame->height, pixels, pitch);
929  SDL_UnlockTexture(*tex);
930  }
931  } else {
932  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
933  ret = -1;
934  }
935  break;
936  case SDL_PIXELFORMAT_IYUV:
937  if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
938  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
939  frame->data[1], frame->linesize[1],
940  frame->data[2], frame->linesize[2]);
941  } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
942  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
943  frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
944  frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
945  } else {
946  av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
947  return -1;
948  }
949  break;
950  default:
951  if (frame->linesize[0] < 0) {
952  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
953  } else {
954  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
955  }
956  break;
957  }
958  return ret;
959 }
960 
962 {
963 #if SDL_VERSION_ATLEAST(2,0,8)
964  SDL_YUV_CONVERSION_MODE mode = SDL_YUV_CONVERSION_AUTOMATIC;
965  if (frame && (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUYV422 || frame->format == AV_PIX_FMT_UYVY422)) {
966  if (frame->color_range == AVCOL_RANGE_JPEG)
967  mode = SDL_YUV_CONVERSION_JPEG;
968  else if (frame->colorspace == AVCOL_SPC_BT709)
969  mode = SDL_YUV_CONVERSION_BT709;
970  else if (frame->colorspace == AVCOL_SPC_BT470BG || frame->colorspace == AVCOL_SPC_SMPTE170M || frame->colorspace == AVCOL_SPC_SMPTE240M)
971  mode = SDL_YUV_CONVERSION_BT601;
972  }
973  SDL_SetYUVConversionMode(mode);
974 #endif
975 }
976 
978 {
979  Frame *vp;
980  Frame *sp = NULL;
981  SDL_Rect rect;
982 
983  vp = frame_queue_peek_last(&is->pictq);
984  if (is->subtitle_st) {
985  if (frame_queue_nb_remaining(&is->subpq) > 0) {
986  sp = frame_queue_peek(&is->subpq);
987 
988  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
989  if (!sp->uploaded) {
990  uint8_t* pixels[4];
991  int pitch[4];
992  int i;
993  if (!sp->width || !sp->height) {
994  sp->width = vp->width;
995  sp->height = vp->height;
996  }
997  if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
998  return;
999 
1000  for (i = 0; i < sp->sub.num_rects; i++) {
1001  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1002 
1003  sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
1004  sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
1005  sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
1006  sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
1007 
1009  sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
1010  sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
1011  0, NULL, NULL, NULL);
1012  if (!is->sub_convert_ctx) {
1013  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1014  return;
1015  }
1016  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) {
1017  sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
1018  0, sub_rect->h, pixels, pitch);
1019  SDL_UnlockTexture(is->sub_texture);
1020  }
1021  }
1022  sp->uploaded = 1;
1023  }
1024  } else
1025  sp = NULL;
1026  }
1027  }
1028 
1029  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1030 
1031  if (!vp->uploaded) {
1032  if (upload_texture(&is->vid_texture, vp->frame, &is->img_convert_ctx) < 0)
1033  return;
1034  vp->uploaded = 1;
1035  vp->flip_v = vp->frame->linesize[0] < 0;
1036  }
1037 
1039  SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
1041  if (sp) {
1042 #if USE_ONEPASS_SUBTITLE_RENDER
1043  SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
1044 #else
1045  int i;
1046  double xratio = (double)rect.w / (double)sp->width;
1047  double yratio = (double)rect.h / (double)sp->height;
1048  for (i = 0; i < sp->sub.num_rects; i++) {
1049  SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
1050  SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
1051  .y = rect.y + sub_rect->y * yratio,
1052  .w = sub_rect->w * xratio,
1053  .h = sub_rect->h * yratio};
1054  SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
1055  }
1056 #endif
1057  }
1058 }
1059 
1060 static inline int compute_mod(int a, int b)
1061 {
1062  return a < 0 ? a%b + b : a%b;
1063 }
1064 
1066 {
1067  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1068  int ch, channels, h, h2;
1069  int64_t time_diff;
1070  int rdft_bits, nb_freq;
1071 
1072  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1073  ;
1074  nb_freq = 1 << (rdft_bits - 1);
1075 
1076  /* compute display index : center on currently output samples */
1077  channels = s->audio_tgt.channels;
1078  nb_display_channels = channels;
1079  if (!s->paused) {
1080  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1081  n = 2 * channels;
1082  delay = s->audio_write_buf_size;
1083  delay /= n;
1084 
1085  /* to be more precise, we take into account the time spent since
1086  the last buffer computation */
1087  if (audio_callback_time) {
1088  time_diff = av_gettime_relative() - audio_callback_time;
1089  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1090  }
1091 
1092  delay += 2 * data_used;
1093  if (delay < data_used)
1094  delay = data_used;
1095 
1096  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1097  if (s->show_mode == SHOW_MODE_WAVES) {
1098  h = INT_MIN;
1099  for (i = 0; i < 1000; i += channels) {
1100  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1101  int a = s->sample_array[idx];
1102  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1103  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1104  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1105  int score = a - d;
1106  if (h < score && (b ^ c) < 0) {
1107  h = score;
1108  i_start = idx;
1109  }
1110  }
1111  }
1112 
1113  s->last_i_start = i_start;
1114  } else {
1115  i_start = s->last_i_start;
1116  }
1117 
1118  if (s->show_mode == SHOW_MODE_WAVES) {
1119  SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
1120 
1121  /* total height for one channel */
1122  h = s->height / nb_display_channels;
1123  /* graph height / 2 */
1124  h2 = (h * 9) / 20;
1125  for (ch = 0; ch < nb_display_channels; ch++) {
1126  i = i_start + ch;
1127  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1128  for (x = 0; x < s->width; x++) {
1129  y = (s->sample_array[i] * h2) >> 15;
1130  if (y < 0) {
1131  y = -y;
1132  ys = y1 - y;
1133  } else {
1134  ys = y1;
1135  }
1136  fill_rectangle(s->xleft + x, ys, 1, y);
1137  i += channels;
1138  if (i >= SAMPLE_ARRAY_SIZE)
1139  i -= SAMPLE_ARRAY_SIZE;
1140  }
1141  }
1142 
1143  SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
1144 
1145  for (ch = 1; ch < nb_display_channels; ch++) {
1146  y = s->ytop + ch * h;
1147  fill_rectangle(s->xleft, y, s->width, 1);
1148  }
1149  } else {
1150  if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
1151  return;
1152 
1153  nb_display_channels= FFMIN(nb_display_channels, 2);
1154  if (rdft_bits != s->rdft_bits) {
1155  av_rdft_end(s->rdft);
1156  av_free(s->rdft_data);
1157  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
1158  s->rdft_bits = rdft_bits;
1159  s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
1160  }
1161  if (!s->rdft || !s->rdft_data){
1162  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1163  s->show_mode = SHOW_MODE_WAVES;
1164  } else {
1165  FFTSample *data[2];
1166  SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
1167  uint32_t *pixels;
1168  int pitch;
1169  for (ch = 0; ch < nb_display_channels; ch++) {
1170  data[ch] = s->rdft_data + 2 * nb_freq * ch;
1171  i = i_start + ch;
1172  for (x = 0; x < 2 * nb_freq; x++) {
1173  double w = (x-nb_freq) * (1.0 / nb_freq);
1174  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
1175  i += channels;
1176  if (i >= SAMPLE_ARRAY_SIZE)
1177  i -= SAMPLE_ARRAY_SIZE;
1178  }
1179  av_rdft_calc(s->rdft, data[ch]);
1180  }
1181  /* Least efficient way to do this, we should of course
1182  * directly access it but it is more than fast enough. */
1183  if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
1184  pitch >>= 2;
1185  pixels += pitch * s->height;
1186  for (y = 0; y < s->height; y++) {
1187  double w = 1 / sqrt(nb_freq);
1188  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1189  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][2 * y + 0], data[1][2 * y + 1]))
1190  : a;
1191  a = FFMIN(a, 255);
1192  b = FFMIN(b, 255);
1193  pixels -= pitch;
1194  *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
1195  }
1196  SDL_UnlockTexture(s->vis_texture);
1197  }
1198  SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
1199  }
1200  if (!s->paused)
1201  s->xpos++;
1202  if (s->xpos >= s->width)
1203  s->xpos= s->xleft;
1204  }
1205 }
1206 
1207 static void stream_component_close(VideoState *is, int stream_index)
1208 {
1209  AVFormatContext *ic = is->ic;
1210  AVCodecParameters *codecpar;
1211 
1212  if (stream_index < 0 || stream_index >= ic->nb_streams)
1213  return;
1214  codecpar = ic->streams[stream_index]->codecpar;
1215 
1216  switch (codecpar->codec_type) {
1217  case AVMEDIA_TYPE_AUDIO:
1218  decoder_abort(&is->auddec, &is->sampq);
1219  SDL_CloseAudioDevice(audio_dev);
1220  decoder_destroy(&is->auddec);
1221  swr_free(&is->swr_ctx);
1222  av_freep(&is->audio_buf1);
1223  is->audio_buf1_size = 0;
1224  is->audio_buf = NULL;
1225 
1226  if (is->rdft) {
1227  av_rdft_end(is->rdft);
1228  av_freep(&is->rdft_data);
1229  is->rdft = NULL;
1230  is->rdft_bits = 0;
1231  }
1232  break;
1233  case AVMEDIA_TYPE_VIDEO:
1234  decoder_abort(&is->viddec, &is->pictq);
1235  decoder_destroy(&is->viddec);
1236  break;
1237  case AVMEDIA_TYPE_SUBTITLE:
1238  decoder_abort(&is->subdec, &is->subpq);
1239  decoder_destroy(&is->subdec);
1240  break;
1241  default:
1242  break;
1243  }
1244 
1245  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1246  switch (codecpar->codec_type) {
1247  case AVMEDIA_TYPE_AUDIO:
1248  is->audio_st = NULL;
1249  is->audio_stream = -1;
1250  break;
1251  case AVMEDIA_TYPE_VIDEO:
1252  is->video_st = NULL;
1253  is->video_stream = -1;
1254  break;
1255  case AVMEDIA_TYPE_SUBTITLE:
1256  is->subtitle_st = NULL;
1257  is->subtitle_stream = -1;
1258  break;
1259  default:
1260  break;
1261  }
1262 }
1263 
1265 {
1266  /* XXX: use a special url_shutdown call to abort parse cleanly */
1267  is->abort_request = 1;
1268  SDL_WaitThread(is->read_tid, NULL);
1269 
1270  /* close each stream */
1271  if (is->audio_stream >= 0)
1273  if (is->video_stream >= 0)
1275  if (is->subtitle_stream >= 0)
1277 
1278  avformat_close_input(&is->ic);
1279 
1283 
1284  /* free all pictures */
1285  frame_queue_destory(&is->pictq);
1286  frame_queue_destory(&is->sampq);
1287  frame_queue_destory(&is->subpq);
1288  SDL_DestroyCond(is->continue_read_thread);
1291  av_free(is->filename);
1292  if (is->vis_texture)
1293  SDL_DestroyTexture(is->vis_texture);
1294  if (is->vid_texture)
1295  SDL_DestroyTexture(is->vid_texture);
1296  if (is->sub_texture)
1297  SDL_DestroyTexture(is->sub_texture);
1298  av_free(is);
1299 }
1300 
1301 static void do_exit(VideoState *is)
1302 {
1303  if (is) {
1304  stream_close(is);
1305  }
1306  if (renderer)
1307  SDL_DestroyRenderer(renderer);
1308  if (window)
1309  SDL_DestroyWindow(window);
1310  uninit_opts();
1311 #if CONFIG_AVFILTER
1312  av_freep(&vfilters_list);
1313 #endif
1315  if (show_status)
1316  printf("\n");
1317  SDL_Quit();
1318  av_log(NULL, AV_LOG_QUIET, "%s", "");
1319  exit(0);
1320 }
1321 
1322 static void sigterm_handler(int sig)
1323 {
1324  exit(123);
1325 }
1326 
1328 {
1329  SDL_Rect rect;
1330  int max_width = screen_width ? screen_width : INT_MAX;
1331  int max_height = screen_height ? screen_height : INT_MAX;
1332  if (max_width == INT_MAX && max_height == INT_MAX)
1333  max_height = height;
1334  calculate_display_rect(&rect, 0, 0, max_width, max_height, width, height, sar);
1335  default_width = rect.w;
1336  default_height = rect.h;
1337 }
1338 
1340 {
1341  int w,h;
1342 
1345 
1346  if (!window_title)
1348  SDL_SetWindowTitle(window, window_title);
1349 
1350  SDL_SetWindowSize(window, w, h);
1351  SDL_SetWindowPosition(window, screen_left, screen_top);
1352  if (is_full_screen)
1353  SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1354  SDL_ShowWindow(window);
1355 
1356  is->width = w;
1357  is->height = h;
1358 
1359  return 0;
1360 }
1361 
1362 /* display the current picture, if any */
1364 {
1365  if (!is->width)
1366  video_open(is);
1367 
1368  SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1369  SDL_RenderClear(renderer);
1370  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1371  video_audio_display(is);
1372  else if (is->video_st)
1373  video_image_display(is);
1374  SDL_RenderPresent(renderer);
1375 }
1376 
1377 static double get_clock(Clock *c)
1378 {
1379  if (*c->queue_serial != c->serial)
1380  return NAN;
1381  if (c->paused) {
1382  return c->pts;
1383  } else {
1384  double time = av_gettime_relative() / 1000000.0;
1385  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1386  }
1387 }
1388 
1389 static void set_clock_at(Clock *c, double pts, int serial, double time)
1390 {
1391  c->pts = pts;
1392  c->last_updated = time;
1393  c->pts_drift = c->pts - time;
1394  c->serial = serial;
1395 }
1396 
1397 static void set_clock(Clock *c, double pts, int serial)
1398 {
1399  double time = av_gettime_relative() / 1000000.0;
1400  set_clock_at(c, pts, serial, time);
1401 }
1402 
1403 static void set_clock_speed(Clock *c, double speed)
1404 {
1405  set_clock(c, get_clock(c), c->serial);
1406  c->speed = speed;
1407 }
1408 
1409 static void init_clock(Clock *c, int *queue_serial)
1410 {
1411  c->speed = 1.0;
1412  c->paused = 0;
1413  c->queue_serial = queue_serial;
1414  set_clock(c, NAN, -1);
1415 }
1416 
1417 static void sync_clock_to_slave(Clock *c, Clock *slave)
1418 {
1419  double clock = get_clock(c);
1420  double slave_clock = get_clock(slave);
1421  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1422  set_clock(c, slave_clock, slave->serial);
1423 }
1424 
1426  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1427  if (is->video_st)
1428  return AV_SYNC_VIDEO_MASTER;
1429  else
1430  return AV_SYNC_AUDIO_MASTER;
1431  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1432  if (is->audio_st)
1433  return AV_SYNC_AUDIO_MASTER;
1434  else
1435  return AV_SYNC_EXTERNAL_CLOCK;
1436  } else {
1437  return AV_SYNC_EXTERNAL_CLOCK;
1438  }
1439 }
1440 
1441 /* get the current master clock value */
1443 {
1444  double val;
1445 
1446  switch (get_master_sync_type(is)) {
1447  case AV_SYNC_VIDEO_MASTER:
1448  val = get_clock(&is->vidclk);
1449  break;
1450  case AV_SYNC_AUDIO_MASTER:
1451  val = get_clock(&is->audclk);
1452  break;
1453  default:
1454  val = get_clock(&is->extclk);
1455  break;
1456  }
1457  return val;
1458 }
1459 
1461  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1464  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1467  } else {
1468  double speed = is->extclk.speed;
1469  if (speed != 1.0)
1470  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1471  }
1472 }
1473 
1474 /* seek in the stream */
1475 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1476 {
1477  if (!is->seek_req) {
1478  is->seek_pos = pos;
1479  is->seek_rel = rel;
1480  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1481  if (seek_by_bytes)
1483  is->seek_req = 1;
1484  SDL_CondSignal(is->continue_read_thread);
1485  }
1486 }
1487 
1488 /* pause or resume the video */
1490 {
1491  if (is->paused) {
1492  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1493  if (is->read_pause_return != AVERROR(ENOSYS)) {
1494  is->vidclk.paused = 0;
1495  }
1496  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1497  }
1498  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1499  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1500 }
1501 
1503 {
1504  stream_toggle_pause(is);
1505  is->step = 0;
1506 }
1507 
1509 {
1510  is->muted = !is->muted;
1511 }
1512 
1513 static void update_volume(VideoState *is, int sign, double step)
1514 {
1515  double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1516  int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0));
1517  is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1518 }
1519 
1521 {
1522  /* if the stream is paused unpause it, then step */
1523  if (is->paused)
1524  stream_toggle_pause(is);
1525  is->step = 1;
1526 }
1527 
1528 static double compute_target_delay(double delay, VideoState *is)
1529 {
1530  double sync_threshold, diff = 0;
1531 
1532  /* update delay to follow master synchronisation source */
1534  /* if video is slave, we try to correct big delays by
1535  duplicating or deleting a frame */
1536  diff = get_clock(&is->vidclk) - get_master_clock(is);
1537 
1538  /* skip or repeat frame. We take into account the
1539  delay to compute the threshold. I still don't know
1540  if it is the best guess */
1541  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1542  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1543  if (diff <= -sync_threshold)
1544  delay = FFMAX(0, delay + diff);
1545  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1546  delay = delay + diff;
1547  else if (diff >= sync_threshold)
1548  delay = 2 * delay;
1549  }
1550  }
1551 
1552  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1553  delay, -diff);
1554 
1555  return delay;
1556 }
1557 
1558 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1559  if (vp->serial == nextvp->serial) {
1560  double duration = nextvp->pts - vp->pts;
1561  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1562  return vp->duration;
1563  else
1564  return duration;
1565  } else {
1566  return 0.0;
1567  }
1568 }
1569 
1570 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1571  /* update current video pts */
1572  set_clock(&is->vidclk, pts, serial);
1573  sync_clock_to_slave(&is->extclk, &is->vidclk);
1574 }
1575 
1576 /* called to display each frame */
1577 static void video_refresh(void *opaque, double *remaining_time)
1578 {
1579  VideoState *is = opaque;
1580  double time;
1581 
1582  Frame *sp, *sp2;
1583 
1584  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1586 
1587  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1588  time = av_gettime_relative() / 1000000.0;
1589  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1590  video_display(is);
1591  is->last_vis_time = time;
1592  }
1593  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1594  }
1595 
1596  if (is->video_st) {
1597 retry:
1598  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1599  // nothing to do, no picture to display in the queue
1600  } else {
1601  double last_duration, duration, delay;
1602  Frame *vp, *lastvp;
1603 
1604  /* dequeue the picture */
1605  lastvp = frame_queue_peek_last(&is->pictq);
1606  vp = frame_queue_peek(&is->pictq);
1607 
1608  if (vp->serial != is->videoq.serial) {
1609  frame_queue_next(&is->pictq);
1610  goto retry;
1611  }
1612 
1613  if (lastvp->serial != vp->serial)
1614  is->frame_timer = av_gettime_relative() / 1000000.0;
1615 
1616  if (is->paused)
1617  goto display;
1618 
1619  /* compute nominal last_duration */
1620  last_duration = vp_duration(is, lastvp, vp);
1621  delay = compute_target_delay(last_duration, is);
1622 
1623  time= av_gettime_relative()/1000000.0;
1624  if (time < is->frame_timer + delay) {
1625  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1626  goto display;
1627  }
1628 
1629  is->frame_timer += delay;
1630  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1631  is->frame_timer = time;
1632 
1633  SDL_LockMutex(is->pictq.mutex);
1634  if (!isnan(vp->pts))
1635  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1636  SDL_UnlockMutex(is->pictq.mutex);
1637 
1638  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1639  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1640  duration = vp_duration(is, vp, nextvp);
1641  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1642  is->frame_drops_late++;
1643  frame_queue_next(&is->pictq);
1644  goto retry;
1645  }
1646  }
1647 
1648  if (is->subtitle_st) {
1649  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1650  sp = frame_queue_peek(&is->subpq);
1651 
1652  if (frame_queue_nb_remaining(&is->subpq) > 1)
1653  sp2 = frame_queue_peek_next(&is->subpq);
1654  else
1655  sp2 = NULL;
1656 
1657  if (sp->serial != is->subtitleq.serial
1658  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1659  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1660  {
1661  if (sp->uploaded) {
1662  int i;
1663  for (i = 0; i < sp->sub.num_rects; i++) {
1664  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1665  uint8_t *pixels;
1666  int pitch, j;
1667 
1668  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
1669  for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1670  memset(pixels, 0, sub_rect->w << 2);
1671  SDL_UnlockTexture(is->sub_texture);
1672  }
1673  }
1674  }
1675  frame_queue_next(&is->subpq);
1676  } else {
1677  break;
1678  }
1679  }
1680  }
1681 
1682  frame_queue_next(&is->pictq);
1683  is->force_refresh = 1;
1684 
1685  if (is->step && !is->paused)
1686  stream_toggle_pause(is);
1687  }
1688 display:
1689  /* display picture */
1690  if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1691  video_display(is);
1692  }
1693  is->force_refresh = 0;
1694  if (show_status) {
1695  static int64_t last_time;
1696  int64_t cur_time;
1697  int aqsize, vqsize, sqsize;
1698  double av_diff;
1699 
1700  cur_time = av_gettime_relative();
1701  if (!last_time || (cur_time - last_time) >= 30000) {
1702  aqsize = 0;
1703  vqsize = 0;
1704  sqsize = 0;
1705  if (is->audio_st)
1706  aqsize = is->audioq.size;
1707  if (is->video_st)
1708  vqsize = is->videoq.size;
1709  if (is->subtitle_st)
1710  sqsize = is->subtitleq.size;
1711  av_diff = 0;
1712  if (is->audio_st && is->video_st)
1713  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1714  else if (is->video_st)
1715  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1716  else if (is->audio_st)
1717  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1719  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1720  get_master_clock(is),
1721  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1722  av_diff,
1724  aqsize / 1024,
1725  vqsize / 1024,
1726  sqsize,
1729  fflush(stdout);
1730  last_time = cur_time;
1731  }
1732  }
1733 }
1734 
1735 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1736 {
1737  Frame *vp;
1738 
1739 #if defined(DEBUG_SYNC)
1740  printf("frame_type=%c pts=%0.3f\n",
1741  av_get_picture_type_char(src_frame->pict_type), pts);
1742 #endif
1743 
1744  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1745  return -1;
1746 
1747  vp->sar = src_frame->sample_aspect_ratio;
1748  vp->uploaded = 0;
1749 
1750  vp->width = src_frame->width;
1751  vp->height = src_frame->height;
1752  vp->format = src_frame->format;
1753 
1754  vp->pts = pts;
1755  vp->duration = duration;
1756  vp->pos = pos;
1757  vp->serial = serial;
1758 
1759  set_default_window_size(vp->width, vp->height, vp->sar);
1760 
1761  av_frame_move_ref(vp->frame, src_frame);
1762  frame_queue_push(&is->pictq);
1763  return 0;
1764 }
1765 
1767 {
1768  int got_picture;
1769 
1770  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1771  return -1;
1772 
1773  if (got_picture) {
1774  double dpts = NAN;
1775 
1776  if (frame->pts != AV_NOPTS_VALUE)
1777  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1778 
1779  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1780 
1782  if (frame->pts != AV_NOPTS_VALUE) {
1783  double diff = dpts - get_master_clock(is);
1784  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1785  diff - is->frame_last_filter_delay < 0 &&
1786  is->viddec.pkt_serial == is->vidclk.serial &&
1787  is->videoq.nb_packets) {
1788  is->frame_drops_early++;
1789  av_frame_unref(frame);
1790  got_picture = 0;
1791  }
1792  }
1793  }
1794  }
1795 
1796  return got_picture;
1797 }
1798 
1799 #if CONFIG_AVFILTER
1800 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1801  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1802 {
1803  int ret, i;
1804  int nb_filters = graph->nb_filters;
1806 
1807  if (filtergraph) {
1808  outputs = avfilter_inout_alloc();
1809  inputs = avfilter_inout_alloc();
1810  if (!outputs || !inputs) {
1811  ret = AVERROR(ENOMEM);
1812  goto fail;
1813  }
1814 
1815  outputs->name = av_strdup("in");
1816  outputs->filter_ctx = source_ctx;
1817  outputs->pad_idx = 0;
1818  outputs->next = NULL;
1819 
1820  inputs->name = av_strdup("out");
1821  inputs->filter_ctx = sink_ctx;
1822  inputs->pad_idx = 0;
1823  inputs->next = NULL;
1824 
1825  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1826  goto fail;
1827  } else {
1828  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1829  goto fail;
1830  }
1831 
1832  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1833  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1834  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1835 
1836  ret = avfilter_graph_config(graph, NULL);
1837 fail:
1838  avfilter_inout_free(&outputs);
1839  avfilter_inout_free(&inputs);
1840  return ret;
1841 }
1842 
1843 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1844 {
1846  char sws_flags_str[512] = "";
1847  char buffersrc_args[256];
1848  int ret;
1849  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1850  AVCodecParameters *codecpar = is->video_st->codecpar;
1851  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1852  AVDictionaryEntry *e = NULL;
1853  int nb_pix_fmts = 0;
1854  int i, j;
1855 
1856  for (i = 0; i < renderer_info.num_texture_formats; i++) {
1857  for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; j++) {
1858  if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
1859  pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
1860  break;
1861  }
1862  }
1863  }
1864  pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE;
1865 
1866  while ((e = av_dict_get(sws_dict, "", e, AV_DICT_IGNORE_SUFFIX))) {
1867  if (!strcmp(e->key, "sws_flags")) {
1868  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1869  } else
1870  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1871  }
1872  if (strlen(sws_flags_str))
1873  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1874 
1875  graph->scale_sws_opts = av_strdup(sws_flags_str);
1876 
1877  snprintf(buffersrc_args, sizeof(buffersrc_args),
1878  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1879  frame->width, frame->height, frame->format,
1881  codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1));
1882  if (fr.num && fr.den)
1883  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1884 
1885  if ((ret = avfilter_graph_create_filter(&filt_src,
1886  avfilter_get_by_name("buffer"),
1887  "ffplay_buffer", buffersrc_args, NULL,
1888  graph)) < 0)
1889  goto fail;
1890 
1891  ret = avfilter_graph_create_filter(&filt_out,
1892  avfilter_get_by_name("buffersink"),
1893  "ffplay_buffersink", NULL, NULL, graph);
1894  if (ret < 0)
1895  goto fail;
1896 
1897  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1898  goto fail;
1899 
1900  last_filter = filt_out;
1901 
1902 /* Note: this macro adds a filter before the lastly added filter, so the
1903  * processing order of the filters is in reverse */
1904 #define INSERT_FILT(name, arg) do { \
1905  AVFilterContext *filt_ctx; \
1906  \
1907  ret = avfilter_graph_create_filter(&filt_ctx, \
1908  avfilter_get_by_name(name), \
1909  "ffplay_" name, arg, NULL, graph); \
1910  if (ret < 0) \
1911  goto fail; \
1912  \
1913  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1914  if (ret < 0) \
1915  goto fail; \
1916  \
1917  last_filter = filt_ctx; \
1918 } while (0)
1919 
1920  if (autorotate) {
1921  double theta = get_rotation(is->video_st);
1922 
1923  if (fabs(theta - 90) < 1.0) {
1924  INSERT_FILT("transpose", "clock");
1925  } else if (fabs(theta - 180) < 1.0) {
1926  INSERT_FILT("hflip", NULL);
1927  INSERT_FILT("vflip", NULL);
1928  } else if (fabs(theta - 270) < 1.0) {
1929  INSERT_FILT("transpose", "cclock");
1930  } else if (fabs(theta) > 1.0) {
1931  char rotate_buf[64];
1932  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1933  INSERT_FILT("rotate", rotate_buf);
1934  }
1935  }
1936 
1937  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1938  goto fail;
1939 
1940  is->in_video_filter = filt_src;
1941  is->out_video_filter = filt_out;
1942 
1943 fail:
1944  return ret;
1945 }
1946 
1947 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1948 {
1950  int sample_rates[2] = { 0, -1 };
1951  int64_t channel_layouts[2] = { 0, -1 };
1952  int channels[2] = { 0, -1 };
1953  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1954  char aresample_swr_opts[512] = "";
1955  AVDictionaryEntry *e = NULL;
1956  char asrc_args[256];
1957  int ret;
1958 
1959  avfilter_graph_free(&is->agraph);
1960  if (!(is->agraph = avfilter_graph_alloc()))
1961  return AVERROR(ENOMEM);
1962  is->agraph->nb_threads = filter_nbthreads;
1963 
1964  while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
1965  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
1966  if (strlen(aresample_swr_opts))
1967  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
1968  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
1969 
1970  ret = snprintf(asrc_args, sizeof(asrc_args),
1971  "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1972  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1973  is->audio_filter_src.channels,
1974  1, is->audio_filter_src.freq);
1975  if (is->audio_filter_src.channel_layout)
1976  snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1977  ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
1978 
1979  ret = avfilter_graph_create_filter(&filt_asrc,
1980  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1981  asrc_args, NULL, is->agraph);
1982  if (ret < 0)
1983  goto end;
1984 
1985 
1986  ret = avfilter_graph_create_filter(&filt_asink,
1987  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1988  NULL, NULL, is->agraph);
1989  if (ret < 0)
1990  goto end;
1991 
1992  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1993  goto end;
1994  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1995  goto end;
1996 
1997  if (force_output_format) {
1998  channel_layouts[0] = is->audio_tgt.channel_layout;
1999  channels [0] = is->audio_tgt.channels;
2000  sample_rates [0] = is->audio_tgt.freq;
2001  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
2002  goto end;
2003  if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2004  goto end;
2005  if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2006  goto end;
2007  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2008  goto end;
2009  }
2010 
2011 
2012  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2013  goto end;
2014 
2015  is->in_audio_filter = filt_asrc;
2016  is->out_audio_filter = filt_asink;
2017 
2018 end:
2019  if (ret < 0)
2020  avfilter_graph_free(&is->agraph);
2021  return ret;
2022 }
2023 #endif /* CONFIG_AVFILTER */
2024 
2025 static int audio_thread(void *arg)
2026 {
2027  VideoState *is = arg;
2028  AVFrame *frame = av_frame_alloc();
2029  Frame *af;
2030 #if CONFIG_AVFILTER
2031  int last_serial = -1;
2032  int64_t dec_channel_layout;
2033  int reconfigure;
2034 #endif
2035  int got_frame = 0;
2036  AVRational tb;
2037  int ret = 0;
2038 
2039  if (!frame)
2040  return AVERROR(ENOMEM);
2041 
2042  do {
2043  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2044  goto the_end;
2045 
2046  if (got_frame) {
2047  tb = (AVRational){1, frame->sample_rate};
2048 
2049 #if CONFIG_AVFILTER
2050  dec_channel_layout = get_valid_channel_layout(frame->channel_layout, frame->channels);
2051 
2052  reconfigure =
2053  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2054  frame->format, frame->channels) ||
2055  is->audio_filter_src.channel_layout != dec_channel_layout ||
2056  is->audio_filter_src.freq != frame->sample_rate ||
2057  is->auddec.pkt_serial != last_serial;
2058 
2059  if (reconfigure) {
2060  char buf1[1024], buf2[1024];
2061  av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2062  av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2064  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2065  is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2066  frame->sample_rate, frame->channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
2067 
2068  is->audio_filter_src.fmt = frame->format;
2069  is->audio_filter_src.channels = frame->channels;
2070  is->audio_filter_src.channel_layout = dec_channel_layout;
2071  is->audio_filter_src.freq = frame->sample_rate;
2072  last_serial = is->auddec.pkt_serial;
2073 
2074  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2075  goto the_end;
2076  }
2077 
2078  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2079  goto the_end;
2080 
2081  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2082  tb = av_buffersink_get_time_base(is->out_audio_filter);
2083 #endif
2084  if (!(af = frame_queue_peek_writable(&is->sampq)))
2085  goto the_end;
2086 
2087  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2088  af->pos = frame->pkt_pos;
2089  af->serial = is->auddec.pkt_serial;
2090  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2091 
2092  av_frame_move_ref(af->frame, frame);
2093  frame_queue_push(&is->sampq);
2094 
2095 #if CONFIG_AVFILTER
2096  if (is->audioq.serial != is->auddec.pkt_serial)
2097  break;
2098  }
2099  if (ret == AVERROR_EOF)
2100  is->auddec.finished = is->auddec.pkt_serial;
2101 #endif
2102  }
2103  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2104  the_end:
2105 #if CONFIG_AVFILTER
2106  avfilter_graph_free(&is->agraph);
2107 #endif
2108  av_frame_free(&frame);
2109  return ret;
2110 }
2111 
2112 static int decoder_start(Decoder *d, int (*fn)(void *), const char *thread_name, void* arg)
2113 {
2115  d->decoder_tid = SDL_CreateThread(fn, thread_name, arg);
2116  if (!d->decoder_tid) {
2117  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2118  return AVERROR(ENOMEM);
2119  }
2120  return 0;
2121 }
2122 
2123 static int video_thread(void *arg)
2124 {
2125  VideoState *is = arg;
2126  AVFrame *frame = av_frame_alloc();
2127  double pts;
2128  double duration;
2129  int ret;
2131  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2132 
2133 #if CONFIG_AVFILTER
2134  AVFilterGraph *graph = NULL;
2135  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2136  int last_w = 0;
2137  int last_h = 0;
2138  enum AVPixelFormat last_format = -2;
2139  int last_serial = -1;
2140  int last_vfilter_idx = 0;
2141 #endif
2142 
2143  if (!frame)
2144  return AVERROR(ENOMEM);
2145 
2146  for (;;) {
2147  ret = get_video_frame(is, frame);
2148  if (ret < 0)
2149  goto the_end;
2150  if (!ret)
2151  continue;
2152 
2153 #if CONFIG_AVFILTER
2154  if ( last_w != frame->width
2155  || last_h != frame->height
2156  || last_format != frame->format
2157  || last_serial != is->viddec.pkt_serial
2158  || last_vfilter_idx != is->vfilter_idx) {
2160  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2161  last_w, last_h,
2162  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2163  frame->width, frame->height,
2164  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2165  avfilter_graph_free(&graph);
2166  graph = avfilter_graph_alloc();
2167  if (!graph) {
2168  ret = AVERROR(ENOMEM);
2169  goto the_end;
2170  }
2171  graph->nb_threads = filter_nbthreads;
2172  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2173  SDL_Event event;
2174  event.type = FF_QUIT_EVENT;
2175  event.user.data1 = is;
2176  SDL_PushEvent(&event);
2177  goto the_end;
2178  }
2179  filt_in = is->in_video_filter;
2180  filt_out = is->out_video_filter;
2181  last_w = frame->width;
2182  last_h = frame->height;
2183  last_format = frame->format;
2184  last_serial = is->viddec.pkt_serial;
2185  last_vfilter_idx = is->vfilter_idx;
2186  frame_rate = av_buffersink_get_frame_rate(filt_out);
2187  }
2188 
2189  ret = av_buffersrc_add_frame(filt_in, frame);
2190  if (ret < 0)
2191  goto the_end;
2192 
2193  while (ret >= 0) {
2194  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2195 
2196  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2197  if (ret < 0) {
2198  if (ret == AVERROR_EOF)
2199  is->viddec.finished = is->viddec.pkt_serial;
2200  ret = 0;
2201  break;
2202  }
2203 
2205  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2206  is->frame_last_filter_delay = 0;
2207  tb = av_buffersink_get_time_base(filt_out);
2208 #endif
2209  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2210  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2211  ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);
2212  av_frame_unref(frame);
2213 #if CONFIG_AVFILTER
2214  if (is->videoq.serial != is->viddec.pkt_serial)
2215  break;
2216  }
2217 #endif
2218 
2219  if (ret < 0)
2220  goto the_end;
2221  }
2222  the_end:
2223 #if CONFIG_AVFILTER
2224  avfilter_graph_free(&graph);
2225 #endif
2226  av_frame_free(&frame);
2227  return 0;
2228 }
2229 
2230 static int subtitle_thread(void *arg)
2231 {
2232  VideoState *is = arg;
2233  Frame *sp;
2234  int got_subtitle;
2235  double pts;
2236 
2237  for (;;) {
2238  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2239  return 0;
2240 
2241  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2242  break;
2243 
2244  pts = 0;
2245 
2246  if (got_subtitle && sp->sub.format == 0) {
2247  if (sp->sub.pts != AV_NOPTS_VALUE)
2248  pts = sp->sub.pts / (double)AV_TIME_BASE;
2249  sp->pts = pts;
2250  sp->serial = is->subdec.pkt_serial;
2251  sp->width = is->subdec.avctx->width;
2252  sp->height = is->subdec.avctx->height;
2253  sp->uploaded = 0;
2254 
2255  /* now we can update the picture count */
2256  frame_queue_push(&is->subpq);
2257  } else if (got_subtitle) {
2258  avsubtitle_free(&sp->sub);
2259  }
2260  }
2261  return 0;
2262 }
2263 
2264 /* copy samples for viewing in editor window */
2265 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2266 {
2267  int size, len;
2268 
2269  size = samples_size / sizeof(short);
2270  while (size > 0) {
2272  if (len > size)
2273  len = size;
2274  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2275  samples += len;
2276  is->sample_array_index += len;
2278  is->sample_array_index = 0;
2279  size -= len;
2280  }
2281 }
2282 
2283 /* return the wanted number of samples to get better sync if sync_type is video
2284  * or external master clock */
2285 static int synchronize_audio(VideoState *is, int nb_samples)
2286 {
2287  int wanted_nb_samples = nb_samples;
2288 
2289  /* if not master, then we try to remove or add samples to correct the clock */
2291  double diff, avg_diff;
2292  int min_nb_samples, max_nb_samples;
2293 
2294  diff = get_clock(&is->audclk) - get_master_clock(is);
2295 
2296  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2297  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2299  /* not enough measures to have a correct estimate */
2300  is->audio_diff_avg_count++;
2301  } else {
2302  /* estimate the A-V difference */
2303  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2304 
2305  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2306  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2307  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2308  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2309  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2310  }
2311  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2312  diff, avg_diff, wanted_nb_samples - nb_samples,
2314  }
2315  } else {
2316  /* too big difference : may be initial PTS errors, so
2317  reset A-V filter */
2318  is->audio_diff_avg_count = 0;
2319  is->audio_diff_cum = 0;
2320  }
2321  }
2322 
2323  return wanted_nb_samples;
2324 }
2325 
2326 /**
2327  * Decode one audio frame and return its uncompressed size.
2328  *
2329  * The processed audio frame is decoded, converted if required, and
2330  * stored in is->audio_buf, with size in bytes given by the return
2331  * value.
2332  */
2334 {
2335  int data_size, resampled_data_size;
2336  int64_t dec_channel_layout;
2337  av_unused double audio_clock0;
2338  int wanted_nb_samples;
2339  Frame *af;
2340 
2341  if (is->paused)
2342  return -1;
2343 
2344  do {
2345 #if defined(_WIN32)
2346  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2348  return -1;
2349  av_usleep (1000);
2350  }
2351 #endif
2352  if (!(af = frame_queue_peek_readable(&is->sampq)))
2353  return -1;
2354  frame_queue_next(&is->sampq);
2355  } while (af->serial != is->audioq.serial);
2356 
2357  data_size = av_samples_get_buffer_size(NULL, af->frame->channels,
2358  af->frame->nb_samples,
2359  af->frame->format, 1);
2360 
2361  dec_channel_layout =
2364  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2365 
2366  if (af->frame->format != is->audio_src.fmt ||
2367  dec_channel_layout != is->audio_src.channel_layout ||
2368  af->frame->sample_rate != is->audio_src.freq ||
2369  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2370  swr_free(&is->swr_ctx);
2373  dec_channel_layout, af->frame->format, af->frame->sample_rate,
2374  0, NULL);
2375  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2377  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2380  swr_free(&is->swr_ctx);
2381  return -1;
2382  }
2383  is->audio_src.channel_layout = dec_channel_layout;
2384  is->audio_src.channels = af->frame->channels;
2385  is->audio_src.freq = af->frame->sample_rate;
2386  is->audio_src.fmt = af->frame->format;
2387  }
2388 
2389  if (is->swr_ctx) {
2390  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2391  uint8_t **out = &is->audio_buf1;
2392  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2393  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2394  int len2;
2395  if (out_size < 0) {
2396  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2397  return -1;
2398  }
2399  if (wanted_nb_samples != af->frame->nb_samples) {
2400  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2401  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2402  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2403  return -1;
2404  }
2405  }
2406  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2407  if (!is->audio_buf1)
2408  return AVERROR(ENOMEM);
2409  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2410  if (len2 < 0) {
2411  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2412  return -1;
2413  }
2414  if (len2 == out_count) {
2415  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2416  if (swr_init(is->swr_ctx) < 0)
2417  swr_free(&is->swr_ctx);
2418  }
2419  is->audio_buf = is->audio_buf1;
2420  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2421  } else {
2422  is->audio_buf = af->frame->data[0];
2423  resampled_data_size = data_size;
2424  }
2425 
2426  audio_clock0 = is->audio_clock;
2427  /* update the audio clock with the pts */
2428  if (!isnan(af->pts))
2429  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2430  else
2431  is->audio_clock = NAN;
2432  is->audio_clock_serial = af->serial;
2433 #ifdef DEBUG
2434  {
2435  static double last_clock;
2436  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2437  is->audio_clock - last_clock,
2438  is->audio_clock, audio_clock0);
2439  last_clock = is->audio_clock;
2440  }
2441 #endif
2442  return resampled_data_size;
2443 }
2444 
2445 /* prepare a new audio buffer */
2446 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2447 {
2448  VideoState *is = opaque;
2449  int audio_size, len1;
2450 
2452 
2453  while (len > 0) {
2454  if (is->audio_buf_index >= is->audio_buf_size) {
2455  audio_size = audio_decode_frame(is);
2456  if (audio_size < 0) {
2457  /* if error, just output silence */
2458  is->audio_buf = NULL;
2460  } else {
2461  if (is->show_mode != SHOW_MODE_VIDEO)
2462  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2463  is->audio_buf_size = audio_size;
2464  }
2465  is->audio_buf_index = 0;
2466  }
2467  len1 = is->audio_buf_size - is->audio_buf_index;
2468  if (len1 > len)
2469  len1 = len;
2470  if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2471  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2472  else {
2473  memset(stream, 0, len1);
2474  if (!is->muted && is->audio_buf)
2475  SDL_MixAudioFormat(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, is->audio_volume);
2476  }
2477  len -= len1;
2478  stream += len1;
2479  is->audio_buf_index += len1;
2480  }
2482  /* Let's assume the audio driver that is used by SDL has two periods. */
2483  if (!isnan(is->audio_clock)) {
2485  sync_clock_to_slave(&is->extclk, &is->audclk);
2486  }
2487 }
2488 
2489 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2490 {
2491  SDL_AudioSpec wanted_spec, spec;
2492  const char *env;
2493  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2494  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2495  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2496 
2497  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2498  if (env) {
2499  wanted_nb_channels = atoi(env);
2500  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2501  }
2502  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2503  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2504  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2505  }
2506  wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2507  wanted_spec.channels = wanted_nb_channels;
2508  wanted_spec.freq = wanted_sample_rate;
2509  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2510  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2511  return -1;
2512  }
2513  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2514  next_sample_rate_idx--;
2515  wanted_spec.format = AUDIO_S16SYS;
2516  wanted_spec.silence = 0;
2517  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2518  wanted_spec.callback = sdl_audio_callback;
2519  wanted_spec.userdata = opaque;
2520  while (!(audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2521  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2522  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2523  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2524  if (!wanted_spec.channels) {
2525  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2526  wanted_spec.channels = wanted_nb_channels;
2527  if (!wanted_spec.freq) {
2529  "No more combinations to try, audio open failed\n");
2530  return -1;
2531  }
2532  }
2533  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2534  }
2535  if (spec.format != AUDIO_S16SYS) {
2537  "SDL advised audio format %d is not supported!\n", spec.format);
2538  return -1;
2539  }
2540  if (spec.channels != wanted_spec.channels) {
2541  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2542  if (!wanted_channel_layout) {
2544  "SDL advised channel count %d is not supported!\n", spec.channels);
2545  return -1;
2546  }
2547  }
2548 
2549  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2550  audio_hw_params->freq = spec.freq;
2551  audio_hw_params->channel_layout = wanted_channel_layout;
2552  audio_hw_params->channels = spec.channels;
2553  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
2554  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2555  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2556  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2557  return -1;
2558  }
2559  return spec.size;
2560 }
2561 
2562 /* open a given stream. Return 0 if OK */
2563 static int stream_component_open(VideoState *is, int stream_index)
2564 {
2565  AVFormatContext *ic = is->ic;
2566  AVCodecContext *avctx;
2567  AVCodec *codec;
2568  const char *forced_codec_name = NULL;
2569  AVDictionary *opts = NULL;
2570  AVDictionaryEntry *t = NULL;
2571  int sample_rate, nb_channels;
2572  int64_t channel_layout;
2573  int ret = 0;
2574  int stream_lowres = lowres;
2575 
2576  if (stream_index < 0 || stream_index >= ic->nb_streams)
2577  return -1;
2578 
2579  avctx = avcodec_alloc_context3(NULL);
2580  if (!avctx)
2581  return AVERROR(ENOMEM);
2582 
2583  ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2584  if (ret < 0)
2585  goto fail;
2586  avctx->pkt_timebase = ic->streams[stream_index]->time_base;
2587 
2588  codec = avcodec_find_decoder(avctx->codec_id);
2589 
2590  switch(avctx->codec_type){
2591  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2592  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2593  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2594  }
2595  if (forced_codec_name)
2596  codec = avcodec_find_decoder_by_name(forced_codec_name);
2597  if (!codec) {
2598  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2599  "No codec could be found with name '%s'\n", forced_codec_name);
2600  else av_log(NULL, AV_LOG_WARNING,
2601  "No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id));
2602  ret = AVERROR(EINVAL);
2603  goto fail;
2604  }
2605 
2606  avctx->codec_id = codec->id;
2607  if (stream_lowres > codec->max_lowres) {
2608  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2609  codec->max_lowres);
2610  stream_lowres = codec->max_lowres;
2611  }
2612  avctx->lowres = stream_lowres;
2613 
2614  if (fast)
2615  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2616 
2617  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2618  if (!av_dict_get(opts, "threads", NULL, 0))
2619  av_dict_set(&opts, "threads", "auto", 0);
2620  if (stream_lowres)
2621  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2622  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2623  av_dict_set(&opts, "refcounted_frames", "1", 0);
2624  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2625  goto fail;
2626  }
2627  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2628  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2630  goto fail;
2631  }
2632 
2633  is->eof = 0;
2634  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2635  switch (avctx->codec_type) {
2636  case AVMEDIA_TYPE_AUDIO:
2637 #if CONFIG_AVFILTER
2638  {
2639  AVFilterContext *sink;
2640 
2641  is->audio_filter_src.freq = avctx->sample_rate;
2642  is->audio_filter_src.channels = avctx->channels;
2643  is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2644  is->audio_filter_src.fmt = avctx->sample_fmt;
2645  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2646  goto fail;
2647  sink = is->out_audio_filter;
2648  sample_rate = av_buffersink_get_sample_rate(sink);
2649  nb_channels = av_buffersink_get_channels(sink);
2650  channel_layout = av_buffersink_get_channel_layout(sink);
2651  }
2652 #else
2653  sample_rate = avctx->sample_rate;
2654  nb_channels = avctx->channels;
2655  channel_layout = avctx->channel_layout;
2656 #endif
2657 
2658  /* prepare audio output */
2659  if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2660  goto fail;
2661  is->audio_hw_buf_size = ret;
2662  is->audio_src = is->audio_tgt;
2663  is->audio_buf_size = 0;
2664  is->audio_buf_index = 0;
2665 
2666  /* init averaging filter */
2667  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2668  is->audio_diff_avg_count = 0;
2669  /* since we do not have a precise anough audio FIFO fullness,
2670  we correct audio sync only if larger than this threshold */
2672 
2673  is->audio_stream = stream_index;
2674  is->audio_st = ic->streams[stream_index];
2675 
2676  decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
2678  is->auddec.start_pts = is->audio_st->start_time;
2680  }
2681  if ((ret = decoder_start(&is->auddec, audio_thread, "audio_decoder", is)) < 0)
2682  goto out;
2683  SDL_PauseAudioDevice(audio_dev, 0);
2684  break;
2685  case AVMEDIA_TYPE_VIDEO:
2686  is->video_stream = stream_index;
2687  is->video_st = ic->streams[stream_index];
2688 
2689  decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
2690  if ((ret = decoder_start(&is->viddec, video_thread, "video_decoder", is)) < 0)
2691  goto out;
2692  is->queue_attachments_req = 1;
2693  break;
2694  case AVMEDIA_TYPE_SUBTITLE:
2695  is->subtitle_stream = stream_index;
2696  is->subtitle_st = ic->streams[stream_index];
2697 
2698  decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
2699  if ((ret = decoder_start(&is->subdec, subtitle_thread, "subtitle_decoder", is)) < 0)
2700  goto out;
2701  break;
2702  default:
2703  break;
2704  }
2705  goto out;
2706 
2707 fail:
2708  avcodec_free_context(&avctx);
2709 out:
2710  av_dict_free(&opts);
2711 
2712  return ret;
2713 }
2714 
2715 static int decode_interrupt_cb(void *ctx)
2716 {
2717  VideoState *is = ctx;
2718  return is->abort_request;
2719 }
2720 
2721 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2722  return stream_id < 0 ||
2723  queue->abort_request ||
2725  queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2726 }
2727 
2729 {
2730  if( !strcmp(s->iformat->name, "rtp")
2731  || !strcmp(s->iformat->name, "rtsp")
2732  || !strcmp(s->iformat->name, "sdp")
2733  )
2734  return 1;
2735 
2736  if(s->pb && ( !strncmp(s->url, "rtp:", 4)
2737  || !strncmp(s->url, "udp:", 4)
2738  )
2739  )
2740  return 1;
2741  return 0;
2742 }
2743 
2744 /* this thread gets the stream from the disk or the network */
2745 static int read_thread(void *arg)
2746 {
2747  VideoState *is = arg;
2748  AVFormatContext *ic = NULL;
2749  int err, i, ret;
2750  int st_index[AVMEDIA_TYPE_NB];
2751  AVPacket pkt1, *pkt = &pkt1;
2752  int64_t stream_start_time;
2753  int pkt_in_play_range = 0;
2754  AVDictionaryEntry *t;
2755  SDL_mutex *wait_mutex = SDL_CreateMutex();
2756  int scan_all_pmts_set = 0;
2757  int64_t pkt_ts;
2758 
2759  if (!wait_mutex) {
2760  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2761  ret = AVERROR(ENOMEM);
2762  goto fail;
2763  }
2764 
2765  memset(st_index, -1, sizeof(st_index));
2766  is->last_video_stream = is->video_stream = -1;
2767  is->last_audio_stream = is->audio_stream = -1;
2768  is->last_subtitle_stream = is->subtitle_stream = -1;
2769  is->eof = 0;
2770 
2771  ic = avformat_alloc_context();
2772  if (!ic) {
2773  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2774  ret = AVERROR(ENOMEM);
2775  goto fail;
2776  }
2779  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2780  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2781  scan_all_pmts_set = 1;
2782  }
2783  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2784  if (err < 0) {
2785  print_error(is->filename, err);
2786  ret = -1;
2787  goto fail;
2788  }
2789  if (scan_all_pmts_set)
2790  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2791 
2793  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2795  goto fail;
2796  }
2797  is->ic = ic;
2798 
2799  if (genpts)
2800  ic->flags |= AVFMT_FLAG_GENPTS;
2801 
2803 
2804  if (find_stream_info) {
2806  int orig_nb_streams = ic->nb_streams;
2807 
2808  err = avformat_find_stream_info(ic, opts);
2809 
2810  for (i = 0; i < orig_nb_streams; i++)
2811  av_dict_free(&opts[i]);
2812  av_freep(&opts);
2813 
2814  if (err < 0) {
2816  "%s: could not find codec parameters\n", is->filename);
2817  ret = -1;
2818  goto fail;
2819  }
2820  }
2821 
2822  if (ic->pb)
2823  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2824 
2825  if (seek_by_bytes < 0)
2826  seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2827 
2828  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2829 
2830  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2831  window_title = av_asprintf("%s - %s", t->value, input_filename);
2832 
2833  /* if seeking requested, we execute it */
2834  if (start_time != AV_NOPTS_VALUE) {
2835  int64_t timestamp;
2836 
2837  timestamp = start_time;
2838  /* add the stream start time */
2839  if (ic->start_time != AV_NOPTS_VALUE)
2840  timestamp += ic->start_time;
2841  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2842  if (ret < 0) {
2843  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2844  is->filename, (double)timestamp / AV_TIME_BASE);
2845  }
2846  }
2847 
2848  is->realtime = is_realtime(ic);
2849 
2850  if (show_status)
2851  av_dump_format(ic, 0, is->filename, 0);
2852 
2853  for (i = 0; i < ic->nb_streams; i++) {
2854  AVStream *st = ic->streams[i];
2855  enum AVMediaType type = st->codecpar->codec_type;
2856  st->discard = AVDISCARD_ALL;
2857  if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
2858  if (avformat_match_stream_specifier(ic, st, wanted_stream_spec[type]) > 0)
2859  st_index[type] = i;
2860  }
2861  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2862  if (wanted_stream_spec[i] && st_index[i] == -1) {
2863  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2864  st_index[i] = INT_MAX;
2865  }
2866  }
2867 
2868  if (!video_disable)
2869  st_index[AVMEDIA_TYPE_VIDEO] =
2871  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2872  if (!audio_disable)
2873  st_index[AVMEDIA_TYPE_AUDIO] =
2875  st_index[AVMEDIA_TYPE_AUDIO],
2876  st_index[AVMEDIA_TYPE_VIDEO],
2877  NULL, 0);
2879  st_index[AVMEDIA_TYPE_SUBTITLE] =
2881  st_index[AVMEDIA_TYPE_SUBTITLE],
2882  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2883  st_index[AVMEDIA_TYPE_AUDIO] :
2884  st_index[AVMEDIA_TYPE_VIDEO]),
2885  NULL, 0);
2886 
2887  is->show_mode = show_mode;
2888  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2889  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2890  AVCodecParameters *codecpar = st->codecpar;
2892  if (codecpar->width)
2893  set_default_window_size(codecpar->width, codecpar->height, sar);
2894  }
2895 
2896  /* open the streams */
2897  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2898  stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2899  }
2900 
2901  ret = -1;
2902  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2903  ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2904  }
2905  if (is->show_mode == SHOW_MODE_NONE)
2906  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2907 
2908  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2909  stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2910  }
2911 
2912  if (is->video_stream < 0 && is->audio_stream < 0) {
2913  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2914  is->filename);
2915  ret = -1;
2916  goto fail;
2917  }
2918 
2919  if (infinite_buffer < 0 && is->realtime)
2920  infinite_buffer = 1;
2921 
2922  for (;;) {
2923  if (is->abort_request)
2924  break;
2925  if (is->paused != is->last_paused) {
2926  is->last_paused = is->paused;
2927  if (is->paused)
2928  is->read_pause_return = av_read_pause(ic);
2929  else
2930  av_read_play(ic);
2931  }
2932 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2933  if (is->paused &&
2934  (!strcmp(ic->iformat->name, "rtsp") ||
2935  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2936  /* wait 10 ms to avoid trying to get another packet */
2937  /* XXX: horrible */
2938  SDL_Delay(10);
2939  continue;
2940  }
2941 #endif
2942  if (is->seek_req) {
2943  int64_t seek_target = is->seek_pos;
2944  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2945  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2946 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2947 // of the seek_pos/seek_rel variables
2948 
2949  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2950  if (ret < 0) {
2952  "%s: error while seeking\n", is->ic->url);
2953  } else {
2954  if (is->audio_stream >= 0) {
2955  packet_queue_flush(&is->audioq);
2956  packet_queue_put(&is->audioq, &flush_pkt);
2957  }
2958  if (is->subtitle_stream >= 0) {
2960  packet_queue_put(&is->subtitleq, &flush_pkt);
2961  }
2962  if (is->video_stream >= 0) {
2963  packet_queue_flush(&is->videoq);
2964  packet_queue_put(&is->videoq, &flush_pkt);
2965  }
2966  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2967  set_clock(&is->extclk, NAN, 0);
2968  } else {
2969  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
2970  }
2971  }
2972  is->seek_req = 0;
2973  is->queue_attachments_req = 1;
2974  is->eof = 0;
2975  if (is->paused)
2976  step_to_next_frame(is);
2977  }
2978  if (is->queue_attachments_req) {
2980  AVPacket copy;
2981  if ((ret = av_packet_ref(&copy, &is->video_st->attached_pic)) < 0)
2982  goto fail;
2983  packet_queue_put(&is->videoq, &copy);
2985  }
2986  is->queue_attachments_req = 0;
2987  }
2988 
2989  /* if the queue are full, no need to read more */
2990  if (infinite_buffer<1 &&
2991  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2992  || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
2995  /* wait 10 ms */
2996  SDL_LockMutex(wait_mutex);
2997  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2998  SDL_UnlockMutex(wait_mutex);
2999  continue;
3000  }
3001  if (!is->paused &&
3002  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3003  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3004  if (loop != 1 && (!loop || --loop)) {
3005  stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
3006  } else if (autoexit) {
3007  ret = AVERROR_EOF;
3008  goto fail;
3009  }
3010  }
3011  ret = av_read_frame(ic, pkt);
3012  if (ret < 0) {
3013  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3014  if (is->video_stream >= 0)
3016  if (is->audio_stream >= 0)
3018  if (is->subtitle_stream >= 0)
3020  is->eof = 1;
3021  }
3022  if (ic->pb && ic->pb->error)
3023  break;
3024  SDL_LockMutex(wait_mutex);
3025  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3026  SDL_UnlockMutex(wait_mutex);
3027  continue;
3028  } else {
3029  is->eof = 0;
3030  }
3031  /* check if packet is in play range specified by user, then queue, otherwise discard */
3032  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3033  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3034  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3035  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3036  av_q2d(ic->streams[pkt->stream_index]->time_base) -
3037  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3038  <= ((double)duration / 1000000);
3039  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3040  packet_queue_put(&is->audioq, pkt);
3041  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3043  packet_queue_put(&is->videoq, pkt);
3044  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3045  packet_queue_put(&is->subtitleq, pkt);
3046  } else {
3047  av_packet_unref(pkt);
3048  }
3049  }
3050 
3051  ret = 0;
3052  fail:
3053  if (ic && !is->ic)
3054  avformat_close_input(&ic);
3055 
3056  if (ret != 0) {
3057  SDL_Event event;
3058 
3059  event.type = FF_QUIT_EVENT;
3060  event.user.data1 = is;
3061  SDL_PushEvent(&event);
3062  }
3063  SDL_DestroyMutex(wait_mutex);
3064  return 0;
3065 }
3066 
3067 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
3068 {
3069  VideoState *is;
3070 
3071  is = av_mallocz(sizeof(VideoState));
3072  if (!is)
3073  return NULL;
3074  is->filename = av_strdup(filename);
3075  if (!is->filename)
3076  goto fail;
3077  is->iformat = iformat;
3078  is->ytop = 0;
3079  is->xleft = 0;
3080 
3081  /* start video display */
3082  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3083  goto fail;
3084  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3085  goto fail;
3086  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3087  goto fail;
3088 
3089  if (packet_queue_init(&is->videoq) < 0 ||
3090  packet_queue_init(&is->audioq) < 0 ||
3091  packet_queue_init(&is->subtitleq) < 0)
3092  goto fail;
3093 
3094  if (!(is->continue_read_thread = SDL_CreateCond())) {
3095  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3096  goto fail;
3097  }
3098 
3099  init_clock(&is->vidclk, &is->videoq.serial);
3100  init_clock(&is->audclk, &is->audioq.serial);
3101  init_clock(&is->extclk, &is->extclk.serial);
3102  is->audio_clock_serial = -1;
3103  if (startup_volume < 0)
3104  av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
3105  if (startup_volume > 100)
3106  av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
3107  startup_volume = av_clip(startup_volume, 0, 100);
3108  startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
3110  is->muted = 0;
3111  is->av_sync_type = av_sync_type;
3112  is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
3113  if (!is->read_tid) {
3114  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3115 fail:
3116  stream_close(is);
3117  return NULL;
3118  }
3119  return is;
3120 }
3121 
3123 {
3124  AVFormatContext *ic = is->ic;
3125  int start_index, stream_index;
3126  int old_index;
3127  AVStream *st;
3128  AVProgram *p = NULL;
3129  int nb_streams = is->ic->nb_streams;
3130 
3131  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3132  start_index = is->last_video_stream;
3133  old_index = is->video_stream;
3134  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3135  start_index = is->last_audio_stream;
3136  old_index = is->audio_stream;
3137  } else {
3138  start_index = is->last_subtitle_stream;
3139  old_index = is->subtitle_stream;
3140  }
3141  stream_index = start_index;
3142 
3143  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3145  if (p) {
3146  nb_streams = p->nb_stream_indexes;
3147  for (start_index = 0; start_index < nb_streams; start_index++)
3148  if (p->stream_index[start_index] == stream_index)
3149  break;
3150  if (start_index == nb_streams)
3151  start_index = -1;
3152  stream_index = start_index;
3153  }
3154  }
3155 
3156  for (;;) {
3157  if (++stream_index >= nb_streams)
3158  {
3159  if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3160  {
3161  stream_index = -1;
3162  is->last_subtitle_stream = -1;
3163  goto the_end;
3164  }
3165  if (start_index == -1)
3166  return;
3167  stream_index = 0;
3168  }
3169  if (stream_index == start_index)
3170  return;
3171  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3172  if (st->codecpar->codec_type == codec_type) {
3173  /* check that parameters are OK */
3174  switch (codec_type) {
3175  case AVMEDIA_TYPE_AUDIO:
3176  if (st->codecpar->sample_rate != 0 &&
3177  st->codecpar->channels != 0)
3178  goto the_end;
3179  break;
3180  case AVMEDIA_TYPE_VIDEO:
3181  case AVMEDIA_TYPE_SUBTITLE:
3182  goto the_end;
3183  default:
3184  break;
3185  }
3186  }
3187  }
3188  the_end:
3189  if (p && stream_index != -1)
3190  stream_index = p->stream_index[stream_index];
3191  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3192  av_get_media_type_string(codec_type),
3193  old_index,
3194  stream_index);
3195 
3196  stream_component_close(is, old_index);
3197  stream_component_open(is, stream_index);
3198 }
3199 
3200 
3202 {
3204  SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
3205 }
3206 
3208 {
3209  int next = is->show_mode;
3210  do {
3211  next = (next + 1) % SHOW_MODE_NB;
3212  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3213  if (is->show_mode != next) {
3214  is->force_refresh = 1;
3215  is->show_mode = next;
3216  }
3217 }
3218 
3219 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3220  double remaining_time = 0.0;
3221  SDL_PumpEvents();
3222  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3224  SDL_ShowCursor(0);
3225  cursor_hidden = 1;
3226  }
3227  if (remaining_time > 0.0)
3228  av_usleep((int64_t)(remaining_time * 1000000.0));
3229  remaining_time = REFRESH_RATE;
3230  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3231  video_refresh(is, &remaining_time);
3232  SDL_PumpEvents();
3233  }
3234 }
3235 
3236 static void seek_chapter(VideoState *is, int incr)
3237 {
3238  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3239  int i;
3240 
3241  if (!is->ic->nb_chapters)
3242  return;
3243 
3244  /* find the current chapter */
3245  for (i = 0; i < is->ic->nb_chapters; i++) {
3246  AVChapter *ch = is->ic->chapters[i];
3247  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3248  i--;
3249  break;
3250  }
3251  }
3252 
3253  i += incr;
3254  i = FFMAX(i, 0);
3255  if (i >= is->ic->nb_chapters)
3256  return;
3257 
3258  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3259  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3260  AV_TIME_BASE_Q), 0, 0);
3261 }
3262 
3263 /* handle an event sent by the GUI */
3264 static void event_loop(VideoState *cur_stream)
3265 {
3266  SDL_Event event;
3267  double incr, pos, frac;
3268 
3269  for (;;) {
3270  double x;
3271  refresh_loop_wait_event(cur_stream, &event);
3272  switch (event.type) {
3273  case SDL_KEYDOWN:
3274  if (exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
3275  do_exit(cur_stream);
3276  break;
3277  }
3278  // If we don't yet have a window, skip all key events, because read_thread might still be initializing...
3279  if (!cur_stream->width)
3280  continue;
3281  switch (event.key.keysym.sym) {
3282  case SDLK_f:
3283  toggle_full_screen(cur_stream);
3284  cur_stream->force_refresh = 1;
3285  break;
3286  case SDLK_p:
3287  case SDLK_SPACE:
3288  toggle_pause(cur_stream);
3289  break;
3290  case SDLK_m:
3291  toggle_mute(cur_stream);
3292  break;
3293  case SDLK_KP_MULTIPLY:
3294  case SDLK_0:
3295  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3296  break;
3297  case SDLK_KP_DIVIDE:
3298  case SDLK_9:
3299  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3300  break;
3301  case SDLK_s: // S: Step to next frame
3302  step_to_next_frame(cur_stream);
3303  break;
3304  case SDLK_a:
3306  break;
3307  case SDLK_v:
3309  break;
3310  case SDLK_c:
3314  break;
3315  case SDLK_t:
3317  break;
3318  case SDLK_w:
3319 #if CONFIG_AVFILTER
3320  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3321  if (++cur_stream->vfilter_idx >= nb_vfilters)
3322  cur_stream->vfilter_idx = 0;
3323  } else {
3324  cur_stream->vfilter_idx = 0;
3325  toggle_audio_display(cur_stream);
3326  }
3327 #else
3328  toggle_audio_display(cur_stream);
3329 #endif
3330  break;
3331  case SDLK_PAGEUP:
3332  if (cur_stream->ic->nb_chapters <= 1) {
3333  incr = 600.0;
3334  goto do_seek;
3335  }
3336  seek_chapter(cur_stream, 1);
3337  break;
3338  case SDLK_PAGEDOWN:
3339  if (cur_stream->ic->nb_chapters <= 1) {
3340  incr = -600.0;
3341  goto do_seek;
3342  }
3343  seek_chapter(cur_stream, -1);
3344  break;
3345  case SDLK_LEFT:
3346  incr = seek_interval ? -seek_interval : -10.0;
3347  goto do_seek;
3348  case SDLK_RIGHT:
3349  incr = seek_interval ? seek_interval : 10.0;
3350  goto do_seek;
3351  case SDLK_UP:
3352  incr = 60.0;
3353  goto do_seek;
3354  case SDLK_DOWN:
3355  incr = -60.0;
3356  do_seek:
3357  if (seek_by_bytes) {
3358  pos = -1;
3359  if (pos < 0 && cur_stream->video_stream >= 0)
3360  pos = frame_queue_last_pos(&cur_stream->pictq);
3361  if (pos < 0 && cur_stream->audio_stream >= 0)
3362  pos = frame_queue_last_pos(&cur_stream->sampq);
3363  if (pos < 0)
3364  pos = avio_tell(cur_stream->ic->pb);
3365  if (cur_stream->ic->bit_rate)
3366  incr *= cur_stream->ic->bit_rate / 8.0;
3367  else
3368  incr *= 180000.0;
3369  pos += incr;
3370  stream_seek(cur_stream, pos, incr, 1);
3371  } else {
3372  pos = get_master_clock(cur_stream);
3373  if (isnan(pos))
3374  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3375  pos += incr;
3376  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3377  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3378  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3379  }
3380  break;
3381  default:
3382  break;
3383  }
3384  break;
3385  case SDL_MOUSEBUTTONDOWN:
3386  if (exit_on_mousedown) {
3387  do_exit(cur_stream);
3388  break;
3389  }
3390  if (event.button.button == SDL_BUTTON_LEFT) {
3391  static int64_t last_mouse_left_click = 0;
3392  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3393  toggle_full_screen(cur_stream);
3394  cur_stream->force_refresh = 1;
3395  last_mouse_left_click = 0;
3396  } else {
3397  last_mouse_left_click = av_gettime_relative();
3398  }
3399  }
3400  case SDL_MOUSEMOTION:
3401  if (cursor_hidden) {
3402  SDL_ShowCursor(1);
3403  cursor_hidden = 0;
3404  }
3406  if (event.type == SDL_MOUSEBUTTONDOWN) {
3407  if (event.button.button != SDL_BUTTON_RIGHT)
3408  break;
3409  x = event.button.x;
3410  } else {
3411  if (!(event.motion.state & SDL_BUTTON_RMASK))
3412  break;
3413  x = event.motion.x;
3414  }
3415  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3416  uint64_t size = avio_size(cur_stream->ic->pb);
3417  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3418  } else {
3419  int64_t ts;
3420  int ns, hh, mm, ss;
3421  int tns, thh, tmm, tss;
3422  tns = cur_stream->ic->duration / 1000000LL;
3423  thh = tns / 3600;
3424  tmm = (tns % 3600) / 60;
3425  tss = (tns % 60);
3426  frac = x / cur_stream->width;
3427  ns = frac * tns;
3428  hh = ns / 3600;
3429  mm = (ns % 3600) / 60;
3430  ss = (ns % 60);
3432  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3433  hh, mm, ss, thh, tmm, tss);
3434  ts = frac * cur_stream->ic->duration;
3435  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3436  ts += cur_stream->ic->start_time;
3437  stream_seek(cur_stream, ts, 0, 0);
3438  }
3439  break;
3440  case SDL_WINDOWEVENT:
3441  switch (event.window.event) {
3442  case SDL_WINDOWEVENT_SIZE_CHANGED:
3443  screen_width = cur_stream->width = event.window.data1;
3444  screen_height = cur_stream->height = event.window.data2;
3445  if (cur_stream->vis_texture) {
3446  SDL_DestroyTexture(cur_stream->vis_texture);
3447  cur_stream->vis_texture = NULL;
3448  }
3449  case SDL_WINDOWEVENT_EXPOSED:
3450  cur_stream->force_refresh = 1;
3451  }
3452  break;
3453  case SDL_QUIT:
3454  case FF_QUIT_EVENT:
3455  do_exit(cur_stream);
3456  break;
3457  default:
3458  break;
3459  }
3460  }
3461 }
3462 
3463 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3464 {
3465  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3466  return opt_default(NULL, "video_size", arg);
3467 }
3468 
3469 static int opt_width(void *optctx, const char *opt, const char *arg)
3470 {
3471  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3472  return 0;
3473 }
3474 
3475 static int opt_height(void *optctx, const char *opt, const char *arg)
3476 {
3477  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3478  return 0;
3479 }
3480 
3481 static int opt_format(void *optctx, const char *opt, const char *arg)
3482 {
3483  file_iformat = av_find_input_format(arg);
3484  if (!file_iformat) {
3485  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3486  return AVERROR(EINVAL);
3487  }
3488  return 0;
3489 }
3490 
3491 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3492 {
3493  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3494  return opt_default(NULL, "pixel_format", arg);
3495 }
3496 
3497 static int opt_sync(void *optctx, const char *opt, const char *arg)
3498 {
3499  if (!strcmp(arg, "audio"))
3501  else if (!strcmp(arg, "video"))
3503  else if (!strcmp(arg, "ext"))
3505  else {
3506  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3507  exit(1);
3508  }
3509  return 0;
3510 }
3511 
3512 static int opt_seek(void *optctx, const char *opt, const char *arg)
3513 {
3514  start_time = parse_time_or_die(opt, arg, 1);
3515  return 0;
3516 }
3517 
3518 static int opt_duration(void *optctx, const char *opt, const char *arg)
3519 {
3520  duration = parse_time_or_die(opt, arg, 1);
3521  return 0;
3522 }
3523 
3524 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3525 {
3526  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3527  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3528  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3529  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3530  return 0;
3531 }
3532 
3533 static void opt_input_file(void *optctx, const char *filename)
3534 {
3535  if (input_filename) {
3537  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3538  filename, input_filename);
3539  exit(1);
3540  }
3541  if (!strcmp(filename, "-"))
3542  filename = "pipe:";
3543  input_filename = filename;
3544 }
3545 
3546 static int opt_codec(void *optctx, const char *opt, const char *arg)
3547 {
3548  const char *spec = strchr(opt, ':');
3549  if (!spec) {
3551  "No media specifier was specified in '%s' in option '%s'\n",
3552  arg, opt);
3553  return AVERROR(EINVAL);
3554  }
3555  spec++;
3556  switch (spec[0]) {
3557  case 'a' : audio_codec_name = arg; break;
3558  case 's' : subtitle_codec_name = arg; break;
3559  case 'v' : video_codec_name = arg; break;
3560  default:
3562  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3563  return AVERROR(EINVAL);
3564  }
3565  return 0;
3566 }
3567 
3568 static int dummy;
3569 
3570 static const OptionDef options[] = {
3572  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3573  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3574  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3575  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3576  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3577  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3578  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3579  { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3580  { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3581  { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3582  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3583  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3584  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3585  { "seek_interval", OPT_FLOAT | HAS_ARG, { &seek_interval }, "set seek interval for left/right keys, in seconds", "seconds" },
3586  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3587  { "noborder", OPT_BOOL, { &borderless }, "borderless window" },
3588  { "alwaysontop", OPT_BOOL, { &alwaysontop }, "window always on top" },
3589  { "volume", OPT_INT | HAS_ARG, { &startup_volume}, "set startup volume 0=min 100=max", "volume" },
3590  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3591  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3592  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3593  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3594  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3595  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3596  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3597  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3598  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3599  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3600  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3601  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3602  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3603  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3604  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3605  { "left", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_left }, "set the x position for the left of the window", "x pos" },
3606  { "top", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_top }, "set the y position for the top of the window", "y pos" },
3607 #if CONFIG_AVFILTER
3608  { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3609  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3610 #endif
3611  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3612  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3613  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3614  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3615  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3616  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3617  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3618  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3619  { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3620  { "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
3621  "read and decode the streams to fill missing information with heuristics" },
3622  { "filter_threads", HAS_ARG | OPT_INT | OPT_EXPERT, { &filter_nbthreads }, "number of filter threads per graph" },
3623  { NULL, },
3624 };
3625 
3626 static void show_usage(void)
3627 {
3628  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3629  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3630  av_log(NULL, AV_LOG_INFO, "\n");
3631 }
3632 
3633 void show_help_default(const char *opt, const char *arg)
3634 {
3636  show_usage();
3637  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3638  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3639  printf("\n");
3642 #if !CONFIG_AVFILTER
3644 #else
3646 #endif
3647  printf("\nWhile playing:\n"
3648  "q, ESC quit\n"
3649  "f toggle full screen\n"
3650  "p, SPC pause\n"
3651  "m toggle mute\n"
3652  "9, 0 decrease and increase volume respectively\n"
3653  "/, * decrease and increase volume respectively\n"
3654  "a cycle audio channel in the current program\n"
3655  "v cycle video channel\n"
3656  "t cycle subtitle channel in the current program\n"
3657  "c cycle program\n"
3658  "w cycle video filters or show modes\n"
3659  "s activate frame-step mode\n"
3660  "left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
3661  "down/up seek backward/forward 1 minute\n"
3662  "page down/page up seek backward/forward 10 minutes\n"
3663  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3664  "left double-click toggle full screen\n"
3665  );
3666 }
3667 
3668 /* Called from the main */
3669 int main(int argc, char **argv)
3670 {
3671  int flags;
3672  VideoState *is;
3673 
3674  init_dynload();
3675 
3677  parse_loglevel(argc, argv, options);
3678 
3679  /* register all codecs, demux and protocols */
3680 #if CONFIG_AVDEVICE
3682 #endif
3684 
3685  init_opts();
3686 
3687  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3688  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3689 
3690  show_banner(argc, argv, options);
3691 
3692  parse_options(NULL, argc, argv, options, opt_input_file);
3693 
3694  if (!input_filename) {
3695  show_usage();
3696  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3698  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3699  exit(1);
3700  }
3701 
3702  if (display_disable) {
3703  video_disable = 1;
3704  }
3705  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3706  if (audio_disable)
3707  flags &= ~SDL_INIT_AUDIO;
3708  else {
3709  /* Try to work around an occasional ALSA buffer underflow issue when the
3710  * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3711  if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3712  SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
3713  }
3714  if (display_disable)
3715  flags &= ~SDL_INIT_VIDEO;
3716  if (SDL_Init (flags)) {
3717  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3718  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3719  exit(1);
3720  }
3721 
3722  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3723  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3724 
3725  av_init_packet(&flush_pkt);
3726  flush_pkt.data = (uint8_t *)&flush_pkt;
3727 
3728  if (!display_disable) {
3729  int flags = SDL_WINDOW_HIDDEN;
3730  if (alwaysontop)
3731 #if SDL_VERSION_ATLEAST(2,0,5)
3732  flags |= SDL_WINDOW_ALWAYS_ON_TOP;
3733 #else
3734  av_log(NULL, AV_LOG_WARNING, "Your SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.\n");
3735 #endif
3736  if (borderless)
3737  flags |= SDL_WINDOW_BORDERLESS;
3738  else
3739  flags |= SDL_WINDOW_RESIZABLE;
3740  window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
3741  SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
3742  if (window) {
3743  renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
3744  if (!renderer) {
3745  av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
3746  renderer = SDL_CreateRenderer(window, -1, 0);
3747  }
3748  if (renderer) {
3749  if (!SDL_GetRendererInfo(renderer, &renderer_info))
3750  av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", renderer_info.name);
3751  }
3752  }
3753  if (!window || !renderer || !renderer_info.num_texture_formats) {
3754  av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError());
3755  do_exit(NULL);
3756  }
3757  }
3758 
3759  is = stream_open(input_filename, file_iformat);
3760  if (!is) {
3761  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3762  do_exit(NULL);
3763  }
3764 
3765  event_loop(is);
3766 
3767  /* never returns */
3768 
3769  return 0;
3770 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1581
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:94
AVFilterContext ** filters
Definition: avfilter.h:842
static void packet_queue_abort(PacketQueue *q)
Definition: ffplay.c:523
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:511
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3524
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:120
static void frame_queue_push(FrameQueue *f)
Definition: ffplay.c:780
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:81
static SDL_AudioDeviceID audio_dev
Definition: ffplay.c:370
static void video_image_display(VideoState *is)
Definition: ffplay.c:977
#define NULL
Definition: coverity.c:32
Clock audclk
Definition: ffplay.c:219
const char const char void * val
Definition: avisynth_c.h:863
static int filter_nbthreads
Definition: ffplay.c:357
#define AVFMT_NOBINSEARCH
Format does not allow to fall back on binary search via read_timestamp.
Definition: avformat.h:468
static void packet_queue_flush(PacketQueue *q)
Definition: ffplay.c:498
int width
Definition: ffplay.c:292
#define OPT_EXPERT
Definition: cmdutils.h:163
static double get_clock(Clock *c)
Definition: ffplay.c:1377
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:334
enum AVSampleFormat fmt
Definition: ffplay.c:138
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3475
static const struct TextureFormatEntry sdl_texture_format_map[]
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffplay.c:64
static void copy(const float *p1, float *p2, const int length)
SDL_cond * cond
Definition: ffplay.c:178
int64_t pts_correction_num_faulty_dts
Number of incorrect PTS values so far.
Definition: avcodec.h:3199
static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
Definition: ffplay.c:2489
FrameQueue pictq
Definition: ffplay.c:223
static int screen_top
Definition: ffplay.c:318
Decoder auddec
Definition: ffplay.c:227
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:92
AVStream * subtitle_st
Definition: ffplay.c:277
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
#define VIDEO_PICTURE_QUEUE_SIZE
Definition: ffplay.c:129
static SDL_Renderer * renderer
Definition: ffplay.c:368
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3978
#define SWS_BICUBIC
Definition: swscale.h:60
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1630
double rdftspeed
Definition: ffplay.c:347
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
double frame_timer
Definition: ffplay.c:280
static AVInputFormat * file_iformat
Definition: ffplay.c:310
#define OPT_VIDEO
Definition: cmdutils.h:165
static void opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3533
double get_rotation(AVStream *st)
Definition: cmdutils.c:2202
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:566
const char * fmt
Definition: avisynth_c.h:861
misc image utilities
static int opt_format(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3481
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:83
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AVRational next_pts_tb
Definition: ffplay.c:199
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1425
Main libavfilter public API header.
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
int rindex
Definition: ffplay.c:171
#define AV_SYNC_FRAMEDUP_THRESHOLD
Definition: ffplay.c:84
#define AV_DICT_DONT_OVERWRITE
Don&#39;t overwrite existing entries.
Definition: dict.h:79
static int default_height
Definition: ffplay.c:314
Memory buffer source API.
double frame_last_filter_delay
Definition: ffplay.c:282
FrameQueue sampq
Definition: ffplay.c:225
enum VideoState::ShowMode show_mode
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:203
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:515
AVRational sample_aspect_ratio
Video only.
Definition: avcodec.h:4112
int seek_flags
Definition: ffplay.c:212
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:1014
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:712
int serial
Definition: ffplay.c:124
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio...
Definition: utils.c:5117
#define OPT_AUDIO
Definition: cmdutils.h:166
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3122
int num
Numerator.
Definition: rational.h:59
static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3491
int size
Definition: avcodec.h:1534
static void stream_toggle_pause(VideoState *is)
Definition: ffplay.c:1489
#define DEBUG
Definition: vf_framerate.c:29
MyAVPacketList * first_pkt
Definition: ffplay.c:119
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
Definition: ffplay.c:1475
int av_log2(unsigned v)
Definition: intmath.c:26
static int seek_by_bytes
Definition: ffplay.c:323
double audio_diff_cum
Definition: ffplay.c:237
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
#define REFRESH_RATE
Definition: ffplay.c:100
AVInputFormat * iformat
Definition: ffplay.c:205
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:516
enum AVMediaType codec_type
Definition: rtp.c:37
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1474
AVCodecContext * avctx
Definition: ffplay.c:191
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1187
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:36
int paused
Definition: ffplay.c:208
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3546
static AVStream * video_stream
static unsigned sws_flags
Definition: ffplay.c:110
int abort_request
Definition: ffplay.c:123
AVS_VideoFrame int int int int new_height
Definition: avisynth_c.h:913
GLint GLenum type
Definition: opengl_enc.c:104
int avformat_open_input(AVFormatContext **ps, const char *url, ff_const59 AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: utils.c:537
unsigned num_rects
Definition: avcodec.h:4016
static void set_clock_at(Clock *c, double pts, int serial, double time)
Definition: ffplay.c:1389
static void toggle_pause(VideoState *is)
Definition: ffplay.c:1502
int out_size
Definition: movenc.c:55
double audio_diff_threshold
Definition: ffplay.c:239
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:583
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
discard all
Definition: avcodec.h:829
int64_t channel_layout
Definition: ffplay.c:137
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
Definition: options.c:95
static int audio_disable
Definition: ffplay.c:319
AVStream * audio_st
Definition: ffplay.c:241
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:1069
#define AV_PIX_FMT_RGB444
Definition: pixfmt.h:386
static const char * audio_codec_name
Definition: ffplay.c:344
#define fn(a)
#define OPT_FLOAT
Definition: cmdutils.h:168
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate.The lists are not just lists
int serial
Definition: ffplay.c:157
AVCodec.
Definition: avcodec.h:3555
static void seek_chapter(VideoState *is, int incr)
Definition: ffplay.c:3236
double pts_drift
Definition: ffplay.c:145
#define CMDUTILS_COMMON_OPTIONS
Definition: cmdutils.h:215
AVDictionary * filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, AVCodec *codec)
Filter out options for given codec.
Definition: cmdutils.c:2106
This struct describes the properties of an encoded stream.
Definition: avcodec.h:4028
int width
Definition: ffplay.c:161
functionally identical to above
Definition: pixfmt.h:517
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:845
AVStream * video_st
Definition: ffplay.c:284
Clock extclk
Definition: ffplay.c:221
static VideoState * stream_open(const char *filename, AVInputFormat *iformat)
Definition: ffplay.c:3067
void * opaque
Definition: avio.h:60
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
struct SwsContext * img_convert_ctx
Definition: ffplay.c:287
AVSubtitleRect ** rects
Definition: avcodec.h:4017
Format I/O context.
Definition: avformat.h:1352
static void toggle_audio_display(VideoState *is)
Definition: ffplay.c:3207
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: utils.c:5140
Definition: ffplay.c:154
memory buffer sink API for audio and video
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:294
int av_sync_type
Definition: ffplay.c:233
unsigned int nb_stream_indexes
Definition: avformat.h:1274
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
int rindex_shown
Definition: ffplay.c:176
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:3980
double pts
Definition: ffplay.c:158
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
Public dictionary API.
double audio_diff_avg_coef
Definition: ffplay.c:238
AVRational start_pts_tb
Definition: ffplay.c:197
static int read_thread(void *arg)
Definition: ffplay.c:2745
int keep_last
Definition: ffplay.c:175
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:135
int rdft_bits
Definition: ffplay.c:268
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:897
int size
Definition: ffplay.c:121
static Frame * frame_queue_peek(FrameQueue *f)
Definition: ffplay.c:733
static int64_t start_time
Definition: ffplay.c:331
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2289
The exact code depends on how similar the blocks are and how related they are to the block
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
Definition: cmdutils.c:99
uint8_t
static int nb_streams
Definition: ffprobe.c:281
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
int opt_default(void *optctx, const char *opt, const char *arg)
Fallback for options that are not explicitly handled, these will be parsed through AVOptions...
Definition: cmdutils.c:545
static int default_width
Definition: ffplay.c:313
int last_video_stream
Definition: ffplay.c:304
int width
Video only.
Definition: avcodec.h:4102
int last_subtitle_stream
Definition: ffplay.c:304
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
AVOptions.
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_NOTIMESTAMPS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:663
#define HAS_ARG
Definition: cmdutils.h:161
int audio_hw_buf_size
Definition: ffplay.c:243
static void stream_component_close(VideoState *is, int stream_index)
Definition: ffplay.c:1207
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:202
static int decode_interrupt_cb(void *ctx)
Definition: ffplay.c:2715
#define f(width, name)
Definition: cbs_vp9.c:255
int64_t duration
Definition: ffplay.c:122
struct SwrContext * swr_ctx
Definition: ffplay.c:257
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1551
int finished
Definition: ffplay.c:193
static void event_loop(VideoState *cur_stream)
Definition: ffplay.c:3264
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
Definition: ffplay.c:408
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
Definition: utils.c:5324
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
static void packet_queue_destroy(PacketQueue *q)
Definition: ffplay.c:516
#define AVFMT_FLAG_GENPTS
Generate missing pts even if it requires parsing future frames.
Definition: avformat.h:1484
static int framedrop
Definition: ffplay.c:341
SDL_Texture * vis_texture
Definition: ffplay.c:272
void init_opts(void)
Initialize the cmdutils option system, in particular allocate the *_opts contexts.
Definition: cmdutils.c:85
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1420
AVPacket pkt
Definition: ffplay.c:113
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: utils.c:2126
int bytes_per_sec
Definition: ffplay.c:140
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:144
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
static float seek_interval
Definition: ffplay.c:324
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
static int64_t audio_callback_time
Definition: ffplay.c:361
#define height
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
static int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
Definition: ffplay.c:419
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1483
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
Definition: ffplay.c:543
static void sigterm_handler(int sig)
Definition: ffplay.c:1322
uint8_t * data
Definition: avcodec.h:1533
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:383
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:869
int freq
Definition: ffplay.c:135
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:658
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:5054
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
Frame queue[FRAME_QUEUE_SIZE]
Definition: ffplay.c:170
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define sp
Definition: regdef.h:63
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
Definition: ffplay.c:143
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:2860
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:170
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the &#39;-loglevel&#39; option in the command line args and apply it.
Definition: cmdutils.c:506
external API header
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:86
ptrdiff_t size
Definition: opengl_enc.c:100
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:3981
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:557
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags, int alt_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:177
static int64_t duration
Definition: ffplay.c:332
channels
Definition: aptx.h:33
AVRational sar
Definition: ffplay.c:164
unsigned int * stream_index
Definition: avformat.h:1273
#define av_log(a,...)
int(* callback)(void *)
Definition: avio.h:59
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
Definition: opt.h:276
PacketQueue videoq
Definition: ffplay.c:285
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:609
AVDictionary ** setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts)
Setup AVCodecContext options for avformat_find_stream_info().
Definition: cmdutils.c:2163
AVDictionary * format_opts
Definition: cmdutils.c:73
static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
Definition: ffplay.c:841
static int borderless
Definition: ffplay.c:326
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:308
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:104
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
static void toggle_mute(VideoState *is)
Definition: ffplay.c:1508
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:581
Main libavdevice API header.
static int decoder_start(Decoder *d, int(*fn)(void *), const char *thread_name, void *arg)
Definition: ffplay.c:2112
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
Definition: utils.c:4246
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:3175
libswresample public header
enum AVCodecID id
Definition: avcodec.h:3569
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: utils.c:4229
int audio_diff_avg_count
Definition: ffplay.c:240
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int ytop
Definition: ffplay.c:292
int width
Definition: frame.h:353
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:69
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1592
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:373
int seek_req
Definition: ffplay.c:211
#define AV_PIX_FMT_NE(be, le)
Definition: pixfmt.h:367
#define FRAME_QUEUE_SIZE
Definition: ffplay.c:132
static void video_refresh(void *opaque, double *remaining_time)
Definition: ffplay.c:1577
int read_pause_return
Definition: ffplay.c:215
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:464
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:292
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3979
static AVStream * audio_stream
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:790
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
The libswresample context.
#define MIN_FRAMES
Definition: ffplay.c:67
RDFTContext * rdft
Definition: ffplay.c:267
ff_const59 struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1364
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:806
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
#define EXTERNAL_CLOCK_MAX_FRAMES
Definition: ffplay.c:69
char * url
input or output URL.
Definition: avformat.h:1448
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:739
static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
Definition: ffplay.c:470
static int autorotate
Definition: ffplay.c:355
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:539
unsigned int pos
Definition: spdifenc.c:410
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:106
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: utils.c:4314
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1528
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
enum AVMediaType codec_type
General type of the encoded data.
Definition: avcodec.h:4032
const char * arg
Definition: jacosubdec.c:66
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:588
ff_const59 AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
Definition: format.c:118
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:550
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:427
AVChapter ** chapters
Definition: avformat.h:1582
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:366
simple assert() macros that are a bit more flexible than ISO C assert().
static void stream_close(VideoState *is)
Definition: ffplay.c:1264
static int find_stream_info
Definition: ffplay.c:356
#define AV_PIX_FMT_0BGR32
Definition: pixfmt.h:375
int video_stream
Definition: ffplay.c:283
static int video_open(VideoState *is)
Definition: ffplay.c:1339
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
int * queue_serial
Definition: ffplay.c:150
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1409
int xpos
Definition: ffplay.c:270
int channels
Definition: ffplay.c:136
static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
Definition: ffplay.c:892
static enum ShowMode show_mode
Definition: ffplay.c:343
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1269
#define FFMAX(a, b)
Definition: common.h:94
static void packet_queue_start(PacketQueue *q)
Definition: ffplay.c:534
static const OptionDef options[]
Definition: ffplay.c:3570
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:117
float FFTSample
Definition: avfft.h:35
static int dummy
Definition: ffplay.c:3568
#define fail()
Definition: checkasm.h:122
int8_t exp
Definition: eval.c:72
enum AVPixelFormat format
Definition: ffplay.c:373
double audio_clock
Definition: ffplay.c:235
int force_refresh
Definition: ffplay.c:207
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2332
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
AVDictionary * sws_dict
Definition: cmdutils.c:71
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3497
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2373
static void update_sample_display(VideoState *is, short *samples, int samples_size)
Definition: ffplay.c:2265
void av_rdft_calc(RDFTContext *s, FFTSample *data)
uint32_t end_display_time
Definition: avcodec.h:4015
static void decoder_destroy(Decoder *d)
Definition: ffplay.c:682
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:4018
static int genpts
Definition: ffplay.c:334
static AVPacket flush_pkt
Definition: ffplay.c:363
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:472
int flip_v
Definition: ffplay.c:166
double frame_last_returned_time
Definition: ffplay.c:281
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:513
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:500
static const char * subtitle_codec_name
Definition: ffplay.c:345
static int subtitle_disable
Definition: ffplay.c:321
void av_format_inject_global_side_data(AVFormatContext *s)
This function will cause global side data to be injected in the next packet of each stream as well as...
Definition: utils.c:154
int max_size
Definition: ffplay.c:174
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1408
int step
Definition: ffplay.c:293
SDL_Thread * decoder_tid
Definition: ffplay.c:200
AVDictionary * opts
Definition: movenc.c:50
static SDL_Window * window
Definition: ffplay.c:367
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:63
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
Definition: utils.c:4323
SDL_mutex * mutex
Definition: ffplay.c:125
static av_const double hypot(double x, double y)
Definition: libm.h:366
int audio_write_buf_size
Definition: ffplay.c:249
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
int linesize[4]
Definition: avcodec.h:3996
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:131
#define b
Definition: input.c:41
int channels
number of audio channels, only used for audio.
Definition: frame.h:601
#define SUBPICTURE_QUEUE_SIZE
Definition: ffplay.c:130
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:378
struct MyAVPacketList * next
Definition: ffplay.c:114
#define AV_CH_LAYOUT_STEREO_DOWNMIX
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
#define NAN
Definition: mathematics.h:64
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
#define FFMIN(a, b)
Definition: common.h:96
SDL_mutex * mutex
Definition: ffplay.c:177
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:455
int windex
Definition: ffplay.c:172
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:156
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:557
static int cursor_hidden
Definition: ffplay.c:349
static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
Definition: ffplay.c:581
#define width
AVSubtitle sub
Definition: ffplay.c:156
int width
picture width / height.
Definition: avcodec.h:1794
uint8_t w
Definition: llviddspenc.c:38
int main(int argc, char **argv)
Definition: ffplay.c:3669
int height
Definition: ffplay.c:162
#define SDL_VOLUME_STEP
Definition: ffplay.c:77
static void show_usage(void)
Definition: ffplay.c:3626
int nb_packets
Definition: ffplay.c:120
static int opt_width(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3469
static int get_video_frame(VideoState *is, AVFrame *frame)
Definition: ffplay.c:1766
int frame_drops_late
Definition: ffplay.c:259
struct AudioParams audio_src
Definition: ffplay.c:252
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
Definition: ffplay.c:3219
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1403
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:74
AVFormatContext * ctx
Definition: movenc.c:48
struct SwrContext * swr_alloc_set_opts(struct SwrContext *s, int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
Definition: swresample.c:59
static int fast
Definition: ffplay.c:333
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2313
int last_i_start
Definition: ffplay.c:266
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
uint16_t format
Definition: avcodec.h:4013
#define s(width, name)
Definition: cbs_vp9.c:257
#define OPT_INT64
Definition: cmdutils.h:170
MyAVPacketList * last_pkt
Definition: ffplay.c:119
Definition: avfft.h:72
void av_rdft_end(RDFTContext *s)
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1520
int n
Definition: avisynth_c.h:760
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2333
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
static int is_full_screen
Definition: ffplay.c:360
#define AV_SYNC_THRESHOLD_MAX
Definition: ffplay.c:82
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:830
static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue)
Definition: ffplay.c:2721
static int startup_volume
Definition: ffplay.c:328
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:72
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:481
uint8_t * data[4]
data+linesize for the bitmap of this subtitle.
Definition: avcodec.h:3995
static int decoder_reorder_pts
Definition: ffplay.c:336
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:97
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1397
int16_t sample_array[SAMPLE_ARRAY_SIZE]
Definition: ffplay.c:264
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:1008
int paused
Definition: ffplay.c:149
static const char * input_filename
Definition: ffplay.c:311
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
#define FF_ARRAY_ELEMS(a)
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:843
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:1172
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:535
static Frame * frame_queue_peek_last(FrameQueue *f)
Definition: ffplay.c:743
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3633
if(ret)
int64_t pos
Definition: ffplay.c:160
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
int last_audio_stream
Definition: ffplay.c:304
Stream structure.
Definition: avformat.h:875
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: decode.c:2032
const AVClass * avfilter_get_class(void)
Definition: avfilter.c:1630
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:1003
#define SAMPLE_CORRECTION_PERCENT_MAX
Definition: ffplay.c:89
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:368
#define AV_PIX_FMT_BGR555
Definition: pixfmt.h:390
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:676
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:372
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:5066
char * filename
Definition: ffplay.c:291
static int screen_height
Definition: ffplay.c:316
static int opt_duration(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3518
sample_rate
static AVInputFormat * iformat
Definition: ffprobe.c:258
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
int64_t next_pts
Definition: ffplay.c:198
static int autoexit
Definition: ffplay.c:337
AVFrame * frame
Definition: ffplay.c:155