FFmpeg
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
34 #include "libavutil/eval.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/fifo.h"
40 #include "libavutil/parseutils.h"
41 #include "libavutil/samplefmt.h"
42 #include "libavutil/avassert.h"
43 #include "libavutil/time.h"
44 #include "libavutil/bprint.h"
45 #include "libavformat/avformat.h"
46 #include "libavdevice/avdevice.h"
47 #include "libswscale/swscale.h"
48 #include "libavutil/opt.h"
49 #include "libavcodec/avfft.h"
51 
52 #if CONFIG_AVFILTER
53 # include "libavfilter/avfilter.h"
54 # include "libavfilter/buffersink.h"
55 # include "libavfilter/buffersrc.h"
56 #endif
57 
58 #include <SDL.h>
59 #include <SDL_thread.h>
60 
61 #include "cmdutils.h"
62 
63 #include <assert.h>
64 
65 const char program_name[] = "ffplay";
66 const int program_birth_year = 2003;
67 
68 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
69 #define MIN_FRAMES 25
70 #define EXTERNAL_CLOCK_MIN_FRAMES 2
71 #define EXTERNAL_CLOCK_MAX_FRAMES 10
72 
73 /* Minimum SDL audio buffer size, in samples. */
74 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
75 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
76 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
77 
78 /* Step size for volume control in dB */
79 #define SDL_VOLUME_STEP (0.75)
80 
81 /* no AV sync correction is done if below the minimum AV sync threshold */
82 #define AV_SYNC_THRESHOLD_MIN 0.04
83 /* AV sync correction is done if above the maximum AV sync threshold */
84 #define AV_SYNC_THRESHOLD_MAX 0.1
85 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
86 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
87 /* no AV correction is done if too big error */
88 #define AV_NOSYNC_THRESHOLD 10.0
89 
90 /* maximum audio speed change to get correct sync */
91 #define SAMPLE_CORRECTION_PERCENT_MAX 10
92 
93 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
94 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
95 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
96 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
97 
98 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
99 #define AUDIO_DIFF_AVG_NB 20
100 
101 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
102 #define REFRESH_RATE 0.01
103 
104 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
105 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
106 #define SAMPLE_ARRAY_SIZE (8 * 65536)
107 
108 #define CURSOR_HIDE_DELAY 1000000
109 
110 #define USE_ONEPASS_SUBTITLE_RENDER 1
111 
112 static unsigned sws_flags = SWS_BICUBIC;
113 
114 typedef struct MyAVPacketList {
116  int serial;
118 
119 typedef struct PacketQueue {
122  int size;
123  int64_t duration;
125  int serial;
126  SDL_mutex *mutex;
127  SDL_cond *cond;
128 } PacketQueue;
129 
130 #define VIDEO_PICTURE_QUEUE_SIZE 3
131 #define SUBPICTURE_QUEUE_SIZE 16
132 #define SAMPLE_QUEUE_SIZE 9
133 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
134 
135 typedef struct AudioParams {
136  int freq;
137  int channels;
138  int64_t channel_layout;
142 } AudioParams;
143 
144 typedef struct Clock {
145  double pts; /* clock base */
146  double pts_drift; /* clock base minus time at which we updated the clock */
147  double last_updated;
148  double speed;
149  int serial; /* clock is based on a packet with this serial */
150  int paused;
151  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
152 } Clock;
153 
154 /* Common struct for handling all types of decoded data and allocated render buffers. */
155 typedef struct Frame {
158  int serial;
159  double pts; /* presentation timestamp for the frame */
160  double duration; /* estimated duration of the frame */
161  int64_t pos; /* byte position of the frame in the input file */
162  int width;
163  int height;
164  int format;
166  int uploaded;
167  int flip_v;
168 } Frame;
169 
170 typedef struct FrameQueue {
172  int rindex;
173  int windex;
174  int size;
175  int max_size;
178  SDL_mutex *mutex;
179  SDL_cond *cond;
181 } FrameQueue;
182 
183 enum {
184  AV_SYNC_AUDIO_MASTER, /* default choice */
186  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
187 };
188 
189 typedef struct Decoder {
194  int finished;
196  SDL_cond *empty_queue_cond;
197  int64_t start_pts;
199  int64_t next_pts;
201  SDL_Thread *decoder_tid;
202 } Decoder;
203 
204 typedef struct VideoState {
205  SDL_Thread *read_tid;
209  int paused;
212  int seek_req;
214  int64_t seek_pos;
215  int64_t seek_rel;
218  int realtime;
219 
223 
227 
231 
233 
235 
236  double audio_clock;
238  double audio_diff_cum; /* used for AV difference average computation */
247  unsigned int audio_buf_size; /* in bytes */
248  unsigned int audio_buf1_size;
249  int audio_buf_index; /* in bytes */
252  int muted;
254 #if CONFIG_AVFILTER
255  struct AudioParams audio_filter_src;
256 #endif
261 
262  enum ShowMode {
264  } show_mode;
271  int xpos;
273  SDL_Texture *vis_texture;
274  SDL_Texture *sub_texture;
275  SDL_Texture *vid_texture;
276 
280 
281  double frame_timer;
287  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
290  int eof;
291 
292  char *filename;
294  int step;
295 
296 #if CONFIG_AVFILTER
297  int vfilter_idx;
298  AVFilterContext *in_video_filter; // the first filter in the video chain
299  AVFilterContext *out_video_filter; // the last filter in the video chain
300  AVFilterContext *in_audio_filter; // the first filter in the audio chain
301  AVFilterContext *out_audio_filter; // the last filter in the audio chain
302  AVFilterGraph *agraph; // audio filter graph
303 #endif
304 
306 
308 } VideoState;
309 
310 /* options specified by the user */
312 static const char *input_filename;
313 static const char *window_title;
314 static int default_width = 640;
315 static int default_height = 480;
316 static int screen_width = 0;
317 static int screen_height = 0;
318 static int screen_left = SDL_WINDOWPOS_CENTERED;
319 static int screen_top = SDL_WINDOWPOS_CENTERED;
320 static int audio_disable;
321 static int video_disable;
322 static int subtitle_disable;
323 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
324 static int seek_by_bytes = -1;
325 static float seek_interval = 10;
326 static int display_disable;
327 static int borderless;
328 static int alwaysontop;
329 static int startup_volume = 100;
330 static int show_status = -1;
332 static int64_t start_time = AV_NOPTS_VALUE;
333 static int64_t duration = AV_NOPTS_VALUE;
334 static int fast = 0;
335 static int genpts = 0;
336 static int lowres = 0;
337 static int decoder_reorder_pts = -1;
338 static int autoexit;
339 static int exit_on_keydown;
340 static int exit_on_mousedown;
341 static int loop = 1;
342 static int framedrop = -1;
343 static int infinite_buffer = -1;
344 static enum ShowMode show_mode = SHOW_MODE_NONE;
345 static const char *audio_codec_name;
346 static const char *subtitle_codec_name;
347 static const char *video_codec_name;
348 double rdftspeed = 0.02;
349 static int64_t cursor_last_shown;
350 static int cursor_hidden = 0;
351 #if CONFIG_AVFILTER
352 static const char **vfilters_list = NULL;
353 static int nb_vfilters = 0;
354 static char *afilters = NULL;
355 #endif
356 static int autorotate = 1;
357 static int find_stream_info = 1;
358 static int filter_nbthreads = 0;
359 
360 /* current context */
361 static int is_full_screen;
362 static int64_t audio_callback_time;
363 
364 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
365 
366 static SDL_Window *window;
367 static SDL_Renderer *renderer;
368 static SDL_RendererInfo renderer_info = {0};
369 static SDL_AudioDeviceID audio_dev;
370 
371 static const struct TextureFormatEntry {
375  { AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
376  { AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
377  { AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
378  { AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
379  { AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
380  { AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
381  { AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
382  { AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
383  { AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
384  { AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
385  { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
386  { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
387  { AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
388  { AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
389  { AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
390  { AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
391  { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
392  { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
393  { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
394  { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
395 };
396 
397 #if CONFIG_AVFILTER
398 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
399 {
400  GROW_ARRAY(vfilters_list, nb_vfilters);
401  vfilters_list[nb_vfilters - 1] = arg;
402  return 0;
403 }
404 #endif
405 
406 static inline
407 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
408  enum AVSampleFormat fmt2, int64_t channel_count2)
409 {
410  /* If channel count == 1, planar and non-planar formats are the same */
411  if (channel_count1 == 1 && channel_count2 == 1)
413  else
414  return channel_count1 != channel_count2 || fmt1 != fmt2;
415 }
416 
417 static inline
418 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
419 {
420  if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
421  return channel_layout;
422  else
423  return 0;
424 }
425 
427 {
428  MyAVPacketList pkt1;
429 
430  if (q->abort_request)
431  return -1;
432 
433  if (av_fifo_space(q->pkt_list) < sizeof(pkt1)) {
434  if (av_fifo_grow(q->pkt_list, sizeof(pkt1)) < 0)
435  return -1;
436  }
437 
438  pkt1.pkt = pkt;
439  pkt1.serial = q->serial;
440 
441  av_fifo_generic_write(q->pkt_list, &pkt1, sizeof(pkt1), NULL);
442  q->nb_packets++;
443  q->size += pkt1.pkt->size + sizeof(pkt1);
444  q->duration += pkt1.pkt->duration;
445  /* XXX: should duplicate packet data in DV case */
446  SDL_CondSignal(q->cond);
447  return 0;
448 }
449 
451 {
452  AVPacket *pkt1;
453  int ret;
454 
455  pkt1 = av_packet_alloc();
456  if (!pkt1) {
458  return -1;
459  }
460  av_packet_move_ref(pkt1, pkt);
461 
462  SDL_LockMutex(q->mutex);
463  ret = packet_queue_put_private(q, pkt1);
464  SDL_UnlockMutex(q->mutex);
465 
466  if (ret < 0)
467  av_packet_free(&pkt1);
468 
469  return ret;
470 }
471 
472 static int packet_queue_put_nullpacket(PacketQueue *q, AVPacket *pkt, int stream_index)
473 {
474  pkt->stream_index = stream_index;
475  return packet_queue_put(q, pkt);
476 }
477 
478 /* packet queue handling */
480 {
481  memset(q, 0, sizeof(PacketQueue));
482  q->pkt_list = av_fifo_alloc(sizeof(MyAVPacketList));
483  if (!q->pkt_list)
484  return AVERROR(ENOMEM);
485  q->mutex = SDL_CreateMutex();
486  if (!q->mutex) {
487  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
488  return AVERROR(ENOMEM);
489  }
490  q->cond = SDL_CreateCond();
491  if (!q->cond) {
492  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
493  return AVERROR(ENOMEM);
494  }
495  q->abort_request = 1;
496  return 0;
497 }
498 
500 {
501  MyAVPacketList pkt1;
502 
503  SDL_LockMutex(q->mutex);
504  while (av_fifo_size(q->pkt_list) >= sizeof(pkt1)) {
505  av_fifo_generic_read(q->pkt_list, &pkt1, sizeof(pkt1), NULL);
506  av_packet_free(&pkt1.pkt);
507  }
508  q->nb_packets = 0;
509  q->size = 0;
510  q->duration = 0;
511  q->serial++;
512  SDL_UnlockMutex(q->mutex);
513 }
514 
516 {
518  av_fifo_freep(&q->pkt_list);
519  SDL_DestroyMutex(q->mutex);
520  SDL_DestroyCond(q->cond);
521 }
522 
524 {
525  SDL_LockMutex(q->mutex);
526 
527  q->abort_request = 1;
528 
529  SDL_CondSignal(q->cond);
530 
531  SDL_UnlockMutex(q->mutex);
532 }
533 
535 {
536  SDL_LockMutex(q->mutex);
537  q->abort_request = 0;
538  q->serial++;
539  SDL_UnlockMutex(q->mutex);
540 }
541 
542 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
543 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
544 {
545  MyAVPacketList pkt1;
546  int ret;
547 
548  SDL_LockMutex(q->mutex);
549 
550  for (;;) {
551  if (q->abort_request) {
552  ret = -1;
553  break;
554  }
555 
556  if (av_fifo_size(q->pkt_list) >= sizeof(pkt1)) {
557  av_fifo_generic_read(q->pkt_list, &pkt1, sizeof(pkt1), NULL);
558  q->nb_packets--;
559  q->size -= pkt1.pkt->size + sizeof(pkt1);
560  q->duration -= pkt1.pkt->duration;
561  av_packet_move_ref(pkt, pkt1.pkt);
562  if (serial)
563  *serial = pkt1.serial;
564  av_packet_free(&pkt1.pkt);
565  ret = 1;
566  break;
567  } else if (!block) {
568  ret = 0;
569  break;
570  } else {
571  SDL_CondWait(q->cond, q->mutex);
572  }
573  }
574  SDL_UnlockMutex(q->mutex);
575  return ret;
576 }
577 
578 static int decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
579  memset(d, 0, sizeof(Decoder));
580  d->pkt = av_packet_alloc();
581  if (!d->pkt)
582  return AVERROR(ENOMEM);
583  d->avctx = avctx;
584  d->queue = queue;
585  d->empty_queue_cond = empty_queue_cond;
587  d->pkt_serial = -1;
588  return 0;
589 }
590 
592  int ret = AVERROR(EAGAIN);
593 
594  for (;;) {
595  if (d->queue->serial == d->pkt_serial) {
596  do {
597  if (d->queue->abort_request)
598  return -1;
599 
600  switch (d->avctx->codec_type) {
601  case AVMEDIA_TYPE_VIDEO:
603  if (ret >= 0) {
604  if (decoder_reorder_pts == -1) {
605  frame->pts = frame->best_effort_timestamp;
606  } else if (!decoder_reorder_pts) {
607  frame->pts = frame->pkt_dts;
608  }
609  }
610  break;
611  case AVMEDIA_TYPE_AUDIO:
613  if (ret >= 0) {
614  AVRational tb = (AVRational){1, frame->sample_rate};
615  if (frame->pts != AV_NOPTS_VALUE)
616  frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);
617  else if (d->next_pts != AV_NOPTS_VALUE)
618  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
619  if (frame->pts != AV_NOPTS_VALUE) {
620  d->next_pts = frame->pts + frame->nb_samples;
621  d->next_pts_tb = tb;
622  }
623  }
624  break;
625  }
626  if (ret == AVERROR_EOF) {
627  d->finished = d->pkt_serial;
629  return 0;
630  }
631  if (ret >= 0)
632  return 1;
633  } while (ret != AVERROR(EAGAIN));
634  }
635 
636  do {
637  if (d->queue->nb_packets == 0)
638  SDL_CondSignal(d->empty_queue_cond);
639  if (d->packet_pending) {
640  d->packet_pending = 0;
641  } else {
642  int old_serial = d->pkt_serial;
643  if (packet_queue_get(d->queue, d->pkt, 1, &d->pkt_serial) < 0)
644  return -1;
645  if (old_serial != d->pkt_serial) {
647  d->finished = 0;
648  d->next_pts = d->start_pts;
649  d->next_pts_tb = d->start_pts_tb;
650  }
651  }
652  if (d->queue->serial == d->pkt_serial)
653  break;
654  av_packet_unref(d->pkt);
655  } while (1);
656 
658  int got_frame = 0;
659  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, d->pkt);
660  if (ret < 0) {
661  ret = AVERROR(EAGAIN);
662  } else {
663  if (got_frame && !d->pkt->data) {
664  d->packet_pending = 1;
665  }
666  ret = got_frame ? 0 : (d->pkt->data ? AVERROR(EAGAIN) : AVERROR_EOF);
667  }
668  av_packet_unref(d->pkt);
669  } else {
670  if (avcodec_send_packet(d->avctx, d->pkt) == AVERROR(EAGAIN)) {
671  av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
672  d->packet_pending = 1;
673  } else {
674  av_packet_unref(d->pkt);
675  }
676  }
677  }
678 }
679 
680 static void decoder_destroy(Decoder *d) {
681  av_packet_free(&d->pkt);
683 }
684 
686 {
687  av_frame_unref(vp->frame);
688  avsubtitle_free(&vp->sub);
689 }
690 
691 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
692 {
693  int i;
694  memset(f, 0, sizeof(FrameQueue));
695  if (!(f->mutex = SDL_CreateMutex())) {
696  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
697  return AVERROR(ENOMEM);
698  }
699  if (!(f->cond = SDL_CreateCond())) {
700  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
701  return AVERROR(ENOMEM);
702  }
703  f->pktq = pktq;
704  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
705  f->keep_last = !!keep_last;
706  for (i = 0; i < f->max_size; i++)
707  if (!(f->queue[i].frame = av_frame_alloc()))
708  return AVERROR(ENOMEM);
709  return 0;
710 }
711 
713 {
714  int i;
715  for (i = 0; i < f->max_size; i++) {
716  Frame *vp = &f->queue[i];
718  av_frame_free(&vp->frame);
719  }
720  SDL_DestroyMutex(f->mutex);
721  SDL_DestroyCond(f->cond);
722 }
723 
725 {
726  SDL_LockMutex(f->mutex);
727  SDL_CondSignal(f->cond);
728  SDL_UnlockMutex(f->mutex);
729 }
730 
732 {
733  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
734 }
735 
737 {
738  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
739 }
740 
742 {
743  return &f->queue[f->rindex];
744 }
745 
747 {
748  /* wait until we have space to put a new frame */
749  SDL_LockMutex(f->mutex);
750  while (f->size >= f->max_size &&
751  !f->pktq->abort_request) {
752  SDL_CondWait(f->cond, f->mutex);
753  }
754  SDL_UnlockMutex(f->mutex);
755 
756  if (f->pktq->abort_request)
757  return NULL;
758 
759  return &f->queue[f->windex];
760 }
761 
763 {
764  /* wait until we have a readable a new frame */
765  SDL_LockMutex(f->mutex);
766  while (f->size - f->rindex_shown <= 0 &&
767  !f->pktq->abort_request) {
768  SDL_CondWait(f->cond, f->mutex);
769  }
770  SDL_UnlockMutex(f->mutex);
771 
772  if (f->pktq->abort_request)
773  return NULL;
774 
775  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
776 }
777 
779 {
780  if (++f->windex == f->max_size)
781  f->windex = 0;
782  SDL_LockMutex(f->mutex);
783  f->size++;
784  SDL_CondSignal(f->cond);
785  SDL_UnlockMutex(f->mutex);
786 }
787 
789 {
790  if (f->keep_last && !f->rindex_shown) {
791  f->rindex_shown = 1;
792  return;
793  }
794  frame_queue_unref_item(&f->queue[f->rindex]);
795  if (++f->rindex == f->max_size)
796  f->rindex = 0;
797  SDL_LockMutex(f->mutex);
798  f->size--;
799  SDL_CondSignal(f->cond);
800  SDL_UnlockMutex(f->mutex);
801 }
802 
803 /* return the number of undisplayed frames in the queue */
805 {
806  return f->size - f->rindex_shown;
807 }
808 
809 /* return last shown position */
811 {
812  Frame *fp = &f->queue[f->rindex];
813  if (f->rindex_shown && fp->serial == f->pktq->serial)
814  return fp->pos;
815  else
816  return -1;
817 }
818 
819 static void decoder_abort(Decoder *d, FrameQueue *fq)
820 {
822  frame_queue_signal(fq);
823  SDL_WaitThread(d->decoder_tid, NULL);
824  d->decoder_tid = NULL;
826 }
827 
828 static inline void fill_rectangle(int x, int y, int w, int h)
829 {
830  SDL_Rect rect;
831  rect.x = x;
832  rect.y = y;
833  rect.w = w;
834  rect.h = h;
835  if (w && h)
836  SDL_RenderFillRect(renderer, &rect);
837 }
838 
839 static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
840 {
841  Uint32 format;
842  int access, w, h;
843  if (!*texture || SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
844  void *pixels;
845  int pitch;
846  if (*texture)
847  SDL_DestroyTexture(*texture);
848  if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
849  return -1;
850  if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
851  return -1;
852  if (init_texture) {
853  if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
854  return -1;
855  memset(pixels, 0, pitch * new_height);
856  SDL_UnlockTexture(*texture);
857  }
858  av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
859  }
860  return 0;
861 }
862 
863 static void calculate_display_rect(SDL_Rect *rect,
864  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
865  int pic_width, int pic_height, AVRational pic_sar)
866 {
867  AVRational aspect_ratio = pic_sar;
868  int64_t width, height, x, y;
869 
870  if (av_cmp_q(aspect_ratio, av_make_q(0, 1)) <= 0)
871  aspect_ratio = av_make_q(1, 1);
872 
873  aspect_ratio = av_mul_q(aspect_ratio, av_make_q(pic_width, pic_height));
874 
875  /* XXX: we suppose the screen has a 1.0 pixel ratio */
876  height = scr_height;
877  width = av_rescale(height, aspect_ratio.num, aspect_ratio.den) & ~1;
878  if (width > scr_width) {
879  width = scr_width;
880  height = av_rescale(width, aspect_ratio.den, aspect_ratio.num) & ~1;
881  }
882  x = (scr_width - width) / 2;
883  y = (scr_height - height) / 2;
884  rect->x = scr_xleft + x;
885  rect->y = scr_ytop + y;
886  rect->w = FFMAX((int)width, 1);
887  rect->h = FFMAX((int)height, 1);
888 }
889 
890 static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
891 {
892  int i;
893  *sdl_blendmode = SDL_BLENDMODE_NONE;
894  *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
895  if (format == AV_PIX_FMT_RGB32 ||
899  *sdl_blendmode = SDL_BLENDMODE_BLEND;
900  for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
902  *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
903  return;
904  }
905  }
906 }
907 
908 static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx) {
909  int ret = 0;
910  Uint32 sdl_pix_fmt;
911  SDL_BlendMode sdl_blendmode;
912  get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
913  if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
914  return -1;
915  switch (sdl_pix_fmt) {
916  case SDL_PIXELFORMAT_UNKNOWN:
917  /* This should only happen if we are not using avfilter... */
918  *img_convert_ctx = sws_getCachedContext(*img_convert_ctx,
919  frame->width, frame->height, frame->format, frame->width, frame->height,
921  if (*img_convert_ctx != NULL) {
922  uint8_t *pixels[4];
923  int pitch[4];
924  if (!SDL_LockTexture(*tex, NULL, (void **)pixels, pitch)) {
925  sws_scale(*img_convert_ctx, (const uint8_t * const *)frame->data, frame->linesize,
926  0, frame->height, pixels, pitch);
927  SDL_UnlockTexture(*tex);
928  }
929  } else {
930  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
931  ret = -1;
932  }
933  break;
934  case SDL_PIXELFORMAT_IYUV:
935  if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
936  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
937  frame->data[1], frame->linesize[1],
938  frame->data[2], frame->linesize[2]);
939  } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
940  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
941  frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
942  frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
943  } else {
944  av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
945  return -1;
946  }
947  break;
948  default:
949  if (frame->linesize[0] < 0) {
950  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
951  } else {
952  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
953  }
954  break;
955  }
956  return ret;
957 }
958 
960 {
961 #if SDL_VERSION_ATLEAST(2,0,8)
962  SDL_YUV_CONVERSION_MODE mode = SDL_YUV_CONVERSION_AUTOMATIC;
963  if (frame && (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUYV422 || frame->format == AV_PIX_FMT_UYVY422)) {
964  if (frame->color_range == AVCOL_RANGE_JPEG)
965  mode = SDL_YUV_CONVERSION_JPEG;
966  else if (frame->colorspace == AVCOL_SPC_BT709)
967  mode = SDL_YUV_CONVERSION_BT709;
968  else if (frame->colorspace == AVCOL_SPC_BT470BG || frame->colorspace == AVCOL_SPC_SMPTE170M || frame->colorspace == AVCOL_SPC_SMPTE240M)
969  mode = SDL_YUV_CONVERSION_BT601;
970  }
971  SDL_SetYUVConversionMode(mode);
972 #endif
973 }
974 
976 {
977  Frame *vp;
978  Frame *sp = NULL;
979  SDL_Rect rect;
980 
981  vp = frame_queue_peek_last(&is->pictq);
982  if (is->subtitle_st) {
983  if (frame_queue_nb_remaining(&is->subpq) > 0) {
984  sp = frame_queue_peek(&is->subpq);
985 
986  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
987  if (!sp->uploaded) {
988  uint8_t* pixels[4];
989  int pitch[4];
990  int i;
991  if (!sp->width || !sp->height) {
992  sp->width = vp->width;
993  sp->height = vp->height;
994  }
995  if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
996  return;
997 
998  for (i = 0; i < sp->sub.num_rects; i++) {
999  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1000 
1001  sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
1002  sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
1003  sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
1004  sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
1005 
1006  is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx,
1007  sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
1008  sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
1009  0, NULL, NULL, NULL);
1010  if (!is->sub_convert_ctx) {
1011  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1012  return;
1013  }
1014  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) {
1015  sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
1016  0, sub_rect->h, pixels, pitch);
1017  SDL_UnlockTexture(is->sub_texture);
1018  }
1019  }
1020  sp->uploaded = 1;
1021  }
1022  } else
1023  sp = NULL;
1024  }
1025  }
1026 
1027  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1028 
1029  if (!vp->uploaded) {
1030  if (upload_texture(&is->vid_texture, vp->frame, &is->img_convert_ctx) < 0)
1031  return;
1032  vp->uploaded = 1;
1033  vp->flip_v = vp->frame->linesize[0] < 0;
1034  }
1035 
1037  SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
1039  if (sp) {
1040 #if USE_ONEPASS_SUBTITLE_RENDER
1041  SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
1042 #else
1043  int i;
1044  double xratio = (double)rect.w / (double)sp->width;
1045  double yratio = (double)rect.h / (double)sp->height;
1046  for (i = 0; i < sp->sub.num_rects; i++) {
1047  SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
1048  SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
1049  .y = rect.y + sub_rect->y * yratio,
1050  .w = sub_rect->w * xratio,
1051  .h = sub_rect->h * yratio};
1052  SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
1053  }
1054 #endif
1055  }
1056 }
1057 
1058 static inline int compute_mod(int a, int b)
1059 {
1060  return a < 0 ? a%b + b : a%b;
1061 }
1062 
1064 {
1065  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1066  int ch, channels, h, h2;
1067  int64_t time_diff;
1068  int rdft_bits, nb_freq;
1069 
1070  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1071  ;
1072  nb_freq = 1 << (rdft_bits - 1);
1073 
1074  /* compute display index : center on currently output samples */
1075  channels = s->audio_tgt.channels;
1076  nb_display_channels = channels;
1077  if (!s->paused) {
1078  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1079  n = 2 * channels;
1080  delay = s->audio_write_buf_size;
1081  delay /= n;
1082 
1083  /* to be more precise, we take into account the time spent since
1084  the last buffer computation */
1085  if (audio_callback_time) {
1086  time_diff = av_gettime_relative() - audio_callback_time;
1087  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1088  }
1089 
1090  delay += 2 * data_used;
1091  if (delay < data_used)
1092  delay = data_used;
1093 
1094  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1095  if (s->show_mode == SHOW_MODE_WAVES) {
1096  h = INT_MIN;
1097  for (i = 0; i < 1000; i += channels) {
1098  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1099  int a = s->sample_array[idx];
1100  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1101  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1102  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1103  int score = a - d;
1104  if (h < score && (b ^ c) < 0) {
1105  h = score;
1106  i_start = idx;
1107  }
1108  }
1109  }
1110 
1111  s->last_i_start = i_start;
1112  } else {
1113  i_start = s->last_i_start;
1114  }
1115 
1116  if (s->show_mode == SHOW_MODE_WAVES) {
1117  SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
1118 
1119  /* total height for one channel */
1120  h = s->height / nb_display_channels;
1121  /* graph height / 2 */
1122  h2 = (h * 9) / 20;
1123  for (ch = 0; ch < nb_display_channels; ch++) {
1124  i = i_start + ch;
1125  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1126  for (x = 0; x < s->width; x++) {
1127  y = (s->sample_array[i] * h2) >> 15;
1128  if (y < 0) {
1129  y = -y;
1130  ys = y1 - y;
1131  } else {
1132  ys = y1;
1133  }
1134  fill_rectangle(s->xleft + x, ys, 1, y);
1135  i += channels;
1136  if (i >= SAMPLE_ARRAY_SIZE)
1137  i -= SAMPLE_ARRAY_SIZE;
1138  }
1139  }
1140 
1141  SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
1142 
1143  for (ch = 1; ch < nb_display_channels; ch++) {
1144  y = s->ytop + ch * h;
1145  fill_rectangle(s->xleft, y, s->width, 1);
1146  }
1147  } else {
1148  if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
1149  return;
1150 
1151  if (s->xpos >= s->width)
1152  s->xpos = 0;
1153  nb_display_channels= FFMIN(nb_display_channels, 2);
1154  if (rdft_bits != s->rdft_bits) {
1155  av_rdft_end(s->rdft);
1156  av_free(s->rdft_data);
1157  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
1158  s->rdft_bits = rdft_bits;
1159  s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
1160  }
1161  if (!s->rdft || !s->rdft_data){
1162  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1163  s->show_mode = SHOW_MODE_WAVES;
1164  } else {
1165  FFTSample *data[2];
1166  SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
1167  uint32_t *pixels;
1168  int pitch;
1169  for (ch = 0; ch < nb_display_channels; ch++) {
1170  data[ch] = s->rdft_data + 2 * nb_freq * ch;
1171  i = i_start + ch;
1172  for (x = 0; x < 2 * nb_freq; x++) {
1173  double w = (x-nb_freq) * (1.0 / nb_freq);
1174  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
1175  i += channels;
1176  if (i >= SAMPLE_ARRAY_SIZE)
1177  i -= SAMPLE_ARRAY_SIZE;
1178  }
1179  av_rdft_calc(s->rdft, data[ch]);
1180  }
1181  /* Least efficient way to do this, we should of course
1182  * directly access it but it is more than fast enough. */
1183  if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
1184  pitch >>= 2;
1185  pixels += pitch * s->height;
1186  for (y = 0; y < s->height; y++) {
1187  double w = 1 / sqrt(nb_freq);
1188  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1189  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][2 * y + 0], data[1][2 * y + 1]))
1190  : a;
1191  a = FFMIN(a, 255);
1192  b = FFMIN(b, 255);
1193  pixels -= pitch;
1194  *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
1195  }
1196  SDL_UnlockTexture(s->vis_texture);
1197  }
1198  SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
1199  }
1200  if (!s->paused)
1201  s->xpos++;
1202  }
1203 }
1204 
1205 static void stream_component_close(VideoState *is, int stream_index)
1206 {
1207  AVFormatContext *ic = is->ic;
1208  AVCodecParameters *codecpar;
1209 
1210  if (stream_index < 0 || stream_index >= ic->nb_streams)
1211  return;
1212  codecpar = ic->streams[stream_index]->codecpar;
1213 
1214  switch (codecpar->codec_type) {
1215  case AVMEDIA_TYPE_AUDIO:
1216  decoder_abort(&is->auddec, &is->sampq);
1217  SDL_CloseAudioDevice(audio_dev);
1218  decoder_destroy(&is->auddec);
1219  swr_free(&is->swr_ctx);
1220  av_freep(&is->audio_buf1);
1221  is->audio_buf1_size = 0;
1222  is->audio_buf = NULL;
1223 
1224  if (is->rdft) {
1225  av_rdft_end(is->rdft);
1226  av_freep(&is->rdft_data);
1227  is->rdft = NULL;
1228  is->rdft_bits = 0;
1229  }
1230  break;
1231  case AVMEDIA_TYPE_VIDEO:
1232  decoder_abort(&is->viddec, &is->pictq);
1233  decoder_destroy(&is->viddec);
1234  break;
1235  case AVMEDIA_TYPE_SUBTITLE:
1236  decoder_abort(&is->subdec, &is->subpq);
1237  decoder_destroy(&is->subdec);
1238  break;
1239  default:
1240  break;
1241  }
1242 
1243  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1244  switch (codecpar->codec_type) {
1245  case AVMEDIA_TYPE_AUDIO:
1246  is->audio_st = NULL;
1247  is->audio_stream = -1;
1248  break;
1249  case AVMEDIA_TYPE_VIDEO:
1250  is->video_st = NULL;
1251  is->video_stream = -1;
1252  break;
1253  case AVMEDIA_TYPE_SUBTITLE:
1254  is->subtitle_st = NULL;
1255  is->subtitle_stream = -1;
1256  break;
1257  default:
1258  break;
1259  }
1260 }
1261 
1263 {
1264  /* XXX: use a special url_shutdown call to abort parse cleanly */
1265  is->abort_request = 1;
1266  SDL_WaitThread(is->read_tid, NULL);
1267 
1268  /* close each stream */
1269  if (is->audio_stream >= 0)
1270  stream_component_close(is, is->audio_stream);
1271  if (is->video_stream >= 0)
1272  stream_component_close(is, is->video_stream);
1273  if (is->subtitle_stream >= 0)
1274  stream_component_close(is, is->subtitle_stream);
1275 
1276  avformat_close_input(&is->ic);
1277 
1278  packet_queue_destroy(&is->videoq);
1279  packet_queue_destroy(&is->audioq);
1280  packet_queue_destroy(&is->subtitleq);
1281 
1282  /* free all pictures */
1283  frame_queue_destory(&is->pictq);
1284  frame_queue_destory(&is->sampq);
1285  frame_queue_destory(&is->subpq);
1286  SDL_DestroyCond(is->continue_read_thread);
1287  sws_freeContext(is->img_convert_ctx);
1288  sws_freeContext(is->sub_convert_ctx);
1289  av_free(is->filename);
1290  if (is->vis_texture)
1291  SDL_DestroyTexture(is->vis_texture);
1292  if (is->vid_texture)
1293  SDL_DestroyTexture(is->vid_texture);
1294  if (is->sub_texture)
1295  SDL_DestroyTexture(is->sub_texture);
1296  av_free(is);
1297 }
1298 
1299 static void do_exit(VideoState *is)
1300 {
1301  if (is) {
1302  stream_close(is);
1303  }
1304  if (renderer)
1305  SDL_DestroyRenderer(renderer);
1306  if (window)
1307  SDL_DestroyWindow(window);
1308  uninit_opts();
1309 #if CONFIG_AVFILTER
1310  av_freep(&vfilters_list);
1311 #endif
1313  if (show_status)
1314  printf("\n");
1315  SDL_Quit();
1316  av_log(NULL, AV_LOG_QUIET, "%s", "");
1317  exit(0);
1318 }
1319 
1320 static void sigterm_handler(int sig)
1321 {
1322  exit(123);
1323 }
1324 
1326 {
1327  SDL_Rect rect;
1328  int max_width = screen_width ? screen_width : INT_MAX;
1329  int max_height = screen_height ? screen_height : INT_MAX;
1330  if (max_width == INT_MAX && max_height == INT_MAX)
1331  max_height = height;
1332  calculate_display_rect(&rect, 0, 0, max_width, max_height, width, height, sar);
1333  default_width = rect.w;
1334  default_height = rect.h;
1335 }
1336 
1338 {
1339  int w,h;
1340 
1343 
1344  if (!window_title)
1346  SDL_SetWindowTitle(window, window_title);
1347 
1348  SDL_SetWindowSize(window, w, h);
1349  SDL_SetWindowPosition(window, screen_left, screen_top);
1350  if (is_full_screen)
1351  SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1352  SDL_ShowWindow(window);
1353 
1354  is->width = w;
1355  is->height = h;
1356 
1357  return 0;
1358 }
1359 
1360 /* display the current picture, if any */
1362 {
1363  if (!is->width)
1364  video_open(is);
1365 
1366  SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1367  SDL_RenderClear(renderer);
1368  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1370  else if (is->video_st)
1372  SDL_RenderPresent(renderer);
1373 }
1374 
1375 static double get_clock(Clock *c)
1376 {
1377  if (*c->queue_serial != c->serial)
1378  return NAN;
1379  if (c->paused) {
1380  return c->pts;
1381  } else {
1382  double time = av_gettime_relative() / 1000000.0;
1383  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1384  }
1385 }
1386 
1387 static void set_clock_at(Clock *c, double pts, int serial, double time)
1388 {
1389  c->pts = pts;
1390  c->last_updated = time;
1391  c->pts_drift = c->pts - time;
1392  c->serial = serial;
1393 }
1394 
1395 static void set_clock(Clock *c, double pts, int serial)
1396 {
1397  double time = av_gettime_relative() / 1000000.0;
1398  set_clock_at(c, pts, serial, time);
1399 }
1400 
1401 static void set_clock_speed(Clock *c, double speed)
1402 {
1403  set_clock(c, get_clock(c), c->serial);
1404  c->speed = speed;
1405 }
1406 
1407 static void init_clock(Clock *c, int *queue_serial)
1408 {
1409  c->speed = 1.0;
1410  c->paused = 0;
1411  c->queue_serial = queue_serial;
1412  set_clock(c, NAN, -1);
1413 }
1414 
1415 static void sync_clock_to_slave(Clock *c, Clock *slave)
1416 {
1417  double clock = get_clock(c);
1418  double slave_clock = get_clock(slave);
1419  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1420  set_clock(c, slave_clock, slave->serial);
1421 }
1422 
1424  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1425  if (is->video_st)
1426  return AV_SYNC_VIDEO_MASTER;
1427  else
1428  return AV_SYNC_AUDIO_MASTER;
1429  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1430  if (is->audio_st)
1431  return AV_SYNC_AUDIO_MASTER;
1432  else
1433  return AV_SYNC_EXTERNAL_CLOCK;
1434  } else {
1435  return AV_SYNC_EXTERNAL_CLOCK;
1436  }
1437 }
1438 
1439 /* get the current master clock value */
1441 {
1442  double val;
1443 
1444  switch (get_master_sync_type(is)) {
1445  case AV_SYNC_VIDEO_MASTER:
1446  val = get_clock(&is->vidclk);
1447  break;
1448  case AV_SYNC_AUDIO_MASTER:
1449  val = get_clock(&is->audclk);
1450  break;
1451  default:
1452  val = get_clock(&is->extclk);
1453  break;
1454  }
1455  return val;
1456 }
1457 
1459  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1460  is->audio_stream >= 0 && is->audioq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES) {
1462  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1463  (is->audio_stream < 0 || is->audioq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES)) {
1465  } else {
1466  double speed = is->extclk.speed;
1467  if (speed != 1.0)
1468  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1469  }
1470 }
1471 
1472 /* seek in the stream */
1473 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1474 {
1475  if (!is->seek_req) {
1476  is->seek_pos = pos;
1477  is->seek_rel = rel;
1478  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1479  if (seek_by_bytes)
1480  is->seek_flags |= AVSEEK_FLAG_BYTE;
1481  is->seek_req = 1;
1482  SDL_CondSignal(is->continue_read_thread);
1483  }
1484 }
1485 
1486 /* pause or resume the video */
1488 {
1489  if (is->paused) {
1490  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1491  if (is->read_pause_return != AVERROR(ENOSYS)) {
1492  is->vidclk.paused = 0;
1493  }
1494  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1495  }
1496  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1497  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1498 }
1499 
1501 {
1503  is->step = 0;
1504 }
1505 
1507 {
1508  is->muted = !is->muted;
1509 }
1510 
1511 static void update_volume(VideoState *is, int sign, double step)
1512 {
1513  double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1514  int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0));
1515  is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1516 }
1517 
1519 {
1520  /* if the stream is paused unpause it, then step */
1521  if (is->paused)
1523  is->step = 1;
1524 }
1525 
1526 static double compute_target_delay(double delay, VideoState *is)
1527 {
1528  double sync_threshold, diff = 0;
1529 
1530  /* update delay to follow master synchronisation source */
1532  /* if video is slave, we try to correct big delays by
1533  duplicating or deleting a frame */
1534  diff = get_clock(&is->vidclk) - get_master_clock(is);
1535 
1536  /* skip or repeat frame. We take into account the
1537  delay to compute the threshold. I still don't know
1538  if it is the best guess */
1539  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1540  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1541  if (diff <= -sync_threshold)
1542  delay = FFMAX(0, delay + diff);
1543  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1544  delay = delay + diff;
1545  else if (diff >= sync_threshold)
1546  delay = 2 * delay;
1547  }
1548  }
1549 
1550  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1551  delay, -diff);
1552 
1553  return delay;
1554 }
1555 
1556 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1557  if (vp->serial == nextvp->serial) {
1558  double duration = nextvp->pts - vp->pts;
1559  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1560  return vp->duration;
1561  else
1562  return duration;
1563  } else {
1564  return 0.0;
1565  }
1566 }
1567 
1568 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1569  /* update current video pts */
1570  set_clock(&is->vidclk, pts, serial);
1571  sync_clock_to_slave(&is->extclk, &is->vidclk);
1572 }
1573 
1574 /* called to display each frame */
1575 static void video_refresh(void *opaque, double *remaining_time)
1576 {
1577  VideoState *is = opaque;
1578  double time;
1579 
1580  Frame *sp, *sp2;
1581 
1582  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1584 
1585  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1586  time = av_gettime_relative() / 1000000.0;
1587  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1588  video_display(is);
1589  is->last_vis_time = time;
1590  }
1591  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1592  }
1593 
1594  if (is->video_st) {
1595 retry:
1596  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1597  // nothing to do, no picture to display in the queue
1598  } else {
1599  double last_duration, duration, delay;
1600  Frame *vp, *lastvp;
1601 
1602  /* dequeue the picture */
1603  lastvp = frame_queue_peek_last(&is->pictq);
1604  vp = frame_queue_peek(&is->pictq);
1605 
1606  if (vp->serial != is->videoq.serial) {
1607  frame_queue_next(&is->pictq);
1608  goto retry;
1609  }
1610 
1611  if (lastvp->serial != vp->serial)
1612  is->frame_timer = av_gettime_relative() / 1000000.0;
1613 
1614  if (is->paused)
1615  goto display;
1616 
1617  /* compute nominal last_duration */
1618  last_duration = vp_duration(is, lastvp, vp);
1619  delay = compute_target_delay(last_duration, is);
1620 
1621  time= av_gettime_relative()/1000000.0;
1622  if (time < is->frame_timer + delay) {
1623  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1624  goto display;
1625  }
1626 
1627  is->frame_timer += delay;
1628  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1629  is->frame_timer = time;
1630 
1631  SDL_LockMutex(is->pictq.mutex);
1632  if (!isnan(vp->pts))
1633  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1634  SDL_UnlockMutex(is->pictq.mutex);
1635 
1636  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1637  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1638  duration = vp_duration(is, vp, nextvp);
1639  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1640  is->frame_drops_late++;
1641  frame_queue_next(&is->pictq);
1642  goto retry;
1643  }
1644  }
1645 
1646  if (is->subtitle_st) {
1647  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1648  sp = frame_queue_peek(&is->subpq);
1649 
1650  if (frame_queue_nb_remaining(&is->subpq) > 1)
1651  sp2 = frame_queue_peek_next(&is->subpq);
1652  else
1653  sp2 = NULL;
1654 
1655  if (sp->serial != is->subtitleq.serial
1656  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1657  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1658  {
1659  if (sp->uploaded) {
1660  int i;
1661  for (i = 0; i < sp->sub.num_rects; i++) {
1662  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1663  uint8_t *pixels;
1664  int pitch, j;
1665 
1666  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
1667  for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1668  memset(pixels, 0, sub_rect->w << 2);
1669  SDL_UnlockTexture(is->sub_texture);
1670  }
1671  }
1672  }
1673  frame_queue_next(&is->subpq);
1674  } else {
1675  break;
1676  }
1677  }
1678  }
1679 
1680  frame_queue_next(&is->pictq);
1681  is->force_refresh = 1;
1682 
1683  if (is->step && !is->paused)
1685  }
1686 display:
1687  /* display picture */
1688  if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1689  video_display(is);
1690  }
1691  is->force_refresh = 0;
1692  if (show_status) {
1693  AVBPrint buf;
1694  static int64_t last_time;
1695  int64_t cur_time;
1696  int aqsize, vqsize, sqsize;
1697  double av_diff;
1698 
1699  cur_time = av_gettime_relative();
1700  if (!last_time || (cur_time - last_time) >= 30000) {
1701  aqsize = 0;
1702  vqsize = 0;
1703  sqsize = 0;
1704  if (is->audio_st)
1705  aqsize = is->audioq.size;
1706  if (is->video_st)
1707  vqsize = is->videoq.size;
1708  if (is->subtitle_st)
1709  sqsize = is->subtitleq.size;
1710  av_diff = 0;
1711  if (is->audio_st && is->video_st)
1712  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1713  else if (is->video_st)
1714  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1715  else if (is->audio_st)
1716  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1717 
1719  av_bprintf(&buf,
1720  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1722  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1723  av_diff,
1724  is->frame_drops_early + is->frame_drops_late,
1725  aqsize / 1024,
1726  vqsize / 1024,
1727  sqsize,
1728  is->video_st ? is->viddec.avctx->pts_correction_num_faulty_dts : 0,
1729  is->video_st ? is->viddec.avctx->pts_correction_num_faulty_pts : 0);
1730 
1731  if (show_status == 1 && AV_LOG_INFO > av_log_get_level())
1732  fprintf(stderr, "%s", buf.str);
1733  else
1734  av_log(NULL, AV_LOG_INFO, "%s", buf.str);
1735 
1736  fflush(stderr);
1737  av_bprint_finalize(&buf, NULL);
1738 
1739  last_time = cur_time;
1740  }
1741  }
1742 }
1743 
1744 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1745 {
1746  Frame *vp;
1747 
1748 #if defined(DEBUG_SYNC)
1749  printf("frame_type=%c pts=%0.3f\n",
1750  av_get_picture_type_char(src_frame->pict_type), pts);
1751 #endif
1752 
1753  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1754  return -1;
1755 
1756  vp->sar = src_frame->sample_aspect_ratio;
1757  vp->uploaded = 0;
1758 
1759  vp->width = src_frame->width;
1760  vp->height = src_frame->height;
1761  vp->format = src_frame->format;
1762 
1763  vp->pts = pts;
1764  vp->duration = duration;
1765  vp->pos = pos;
1766  vp->serial = serial;
1767 
1768  set_default_window_size(vp->width, vp->height, vp->sar);
1769 
1770  av_frame_move_ref(vp->frame, src_frame);
1771  frame_queue_push(&is->pictq);
1772  return 0;
1773 }
1774 
1776 {
1777  int got_picture;
1778 
1779  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1780  return -1;
1781 
1782  if (got_picture) {
1783  double dpts = NAN;
1784 
1785  if (frame->pts != AV_NOPTS_VALUE)
1786  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1787 
1788  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1789 
1791  if (frame->pts != AV_NOPTS_VALUE) {
1792  double diff = dpts - get_master_clock(is);
1793  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1794  diff - is->frame_last_filter_delay < 0 &&
1795  is->viddec.pkt_serial == is->vidclk.serial &&
1796  is->videoq.nb_packets) {
1797  is->frame_drops_early++;
1799  got_picture = 0;
1800  }
1801  }
1802  }
1803  }
1804 
1805  return got_picture;
1806 }
1807 
1808 #if CONFIG_AVFILTER
1809 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1810  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1811 {
1812  int ret, i;
1813  int nb_filters = graph->nb_filters;
1815 
1816  if (filtergraph) {
1819  if (!outputs || !inputs) {
1820  ret = AVERROR(ENOMEM);
1821  goto fail;
1822  }
1823 
1824  outputs->name = av_strdup("in");
1825  outputs->filter_ctx = source_ctx;
1826  outputs->pad_idx = 0;
1827  outputs->next = NULL;
1828 
1829  inputs->name = av_strdup("out");
1830  inputs->filter_ctx = sink_ctx;
1831  inputs->pad_idx = 0;
1832  inputs->next = NULL;
1833 
1834  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1835  goto fail;
1836  } else {
1837  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1838  goto fail;
1839  }
1840 
1841  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1842  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1843  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1844 
1845  ret = avfilter_graph_config(graph, NULL);
1846 fail:
1849  return ret;
1850 }
1851 
1852 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1853 {
1855  char sws_flags_str[512] = "";
1856  char buffersrc_args[256];
1857  int ret;
1858  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1859  AVCodecParameters *codecpar = is->video_st->codecpar;
1860  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1861  AVDictionaryEntry *e = NULL;
1862  int nb_pix_fmts = 0;
1863  int i, j;
1864 
1865  for (i = 0; i < renderer_info.num_texture_formats; i++) {
1866  for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; j++) {
1867  if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
1868  pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
1869  break;
1870  }
1871  }
1872  }
1873  pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE;
1874 
1875  while ((e = av_dict_get(sws_dict, "", e, AV_DICT_IGNORE_SUFFIX))) {
1876  if (!strcmp(e->key, "sws_flags")) {
1877  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1878  } else
1879  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1880  }
1881  if (strlen(sws_flags_str))
1882  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1883 
1884  graph->scale_sws_opts = av_strdup(sws_flags_str);
1885 
1886  snprintf(buffersrc_args, sizeof(buffersrc_args),
1887  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1888  frame->width, frame->height, frame->format,
1889  is->video_st->time_base.num, is->video_st->time_base.den,
1890  codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1));
1891  if (fr.num && fr.den)
1892  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1893 
1894  if ((ret = avfilter_graph_create_filter(&filt_src,
1895  avfilter_get_by_name("buffer"),
1896  "ffplay_buffer", buffersrc_args, NULL,
1897  graph)) < 0)
1898  goto fail;
1899 
1900  ret = avfilter_graph_create_filter(&filt_out,
1901  avfilter_get_by_name("buffersink"),
1902  "ffplay_buffersink", NULL, NULL, graph);
1903  if (ret < 0)
1904  goto fail;
1905 
1906  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1907  goto fail;
1908 
1909  last_filter = filt_out;
1910 
1911 /* Note: this macro adds a filter before the lastly added filter, so the
1912  * processing order of the filters is in reverse */
1913 #define INSERT_FILT(name, arg) do { \
1914  AVFilterContext *filt_ctx; \
1915  \
1916  ret = avfilter_graph_create_filter(&filt_ctx, \
1917  avfilter_get_by_name(name), \
1918  "ffplay_" name, arg, NULL, graph); \
1919  if (ret < 0) \
1920  goto fail; \
1921  \
1922  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1923  if (ret < 0) \
1924  goto fail; \
1925  \
1926  last_filter = filt_ctx; \
1927 } while (0)
1928 
1929  if (autorotate) {
1930  double theta = get_rotation(is->video_st);
1931 
1932  if (fabs(theta - 90) < 1.0) {
1933  INSERT_FILT("transpose", "clock");
1934  } else if (fabs(theta - 180) < 1.0) {
1935  INSERT_FILT("hflip", NULL);
1936  INSERT_FILT("vflip", NULL);
1937  } else if (fabs(theta - 270) < 1.0) {
1938  INSERT_FILT("transpose", "cclock");
1939  } else if (fabs(theta) > 1.0) {
1940  char rotate_buf[64];
1941  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1942  INSERT_FILT("rotate", rotate_buf);
1943  }
1944  }
1945 
1946  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1947  goto fail;
1948 
1949  is->in_video_filter = filt_src;
1950  is->out_video_filter = filt_out;
1951 
1952 fail:
1953  return ret;
1954 }
1955 
1956 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1957 {
1959  int sample_rates[2] = { 0, -1 };
1960  int64_t channel_layouts[2] = { 0, -1 };
1961  int channels[2] = { 0, -1 };
1962  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1963  char aresample_swr_opts[512] = "";
1964  AVDictionaryEntry *e = NULL;
1965  char asrc_args[256];
1966  int ret;
1967 
1968  avfilter_graph_free(&is->agraph);
1969  if (!(is->agraph = avfilter_graph_alloc()))
1970  return AVERROR(ENOMEM);
1971  is->agraph->nb_threads = filter_nbthreads;
1972 
1973  while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
1974  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
1975  if (strlen(aresample_swr_opts))
1976  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
1977  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
1978 
1979  ret = snprintf(asrc_args, sizeof(asrc_args),
1980  "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1981  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1982  is->audio_filter_src.channels,
1983  1, is->audio_filter_src.freq);
1984  if (is->audio_filter_src.channel_layout)
1985  snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1986  ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
1987 
1988  ret = avfilter_graph_create_filter(&filt_asrc,
1989  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1990  asrc_args, NULL, is->agraph);
1991  if (ret < 0)
1992  goto end;
1993 
1994 
1995  ret = avfilter_graph_create_filter(&filt_asink,
1996  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1997  NULL, NULL, is->agraph);
1998  if (ret < 0)
1999  goto end;
2000 
2001  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
2002  goto end;
2003  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
2004  goto end;
2005 
2006  if (force_output_format) {
2007  channel_layouts[0] = is->audio_tgt.channel_layout;
2008  channels [0] = is->audio_tgt.channel_layout ? -1 : is->audio_tgt.channels;
2009  sample_rates [0] = is->audio_tgt.freq;
2010  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
2011  goto end;
2012  if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2013  goto end;
2014  if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2015  goto end;
2016  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2017  goto end;
2018  }
2019 
2020 
2021  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2022  goto end;
2023 
2024  is->in_audio_filter = filt_asrc;
2025  is->out_audio_filter = filt_asink;
2026 
2027 end:
2028  if (ret < 0)
2029  avfilter_graph_free(&is->agraph);
2030  return ret;
2031 }
2032 #endif /* CONFIG_AVFILTER */
2033 
2034 static int audio_thread(void *arg)
2035 {
2036  VideoState *is = arg;
2038  Frame *af;
2039 #if CONFIG_AVFILTER
2040  int last_serial = -1;
2041  int64_t dec_channel_layout;
2042  int reconfigure;
2043 #endif
2044  int got_frame = 0;
2045  AVRational tb;
2046  int ret = 0;
2047 
2048  if (!frame)
2049  return AVERROR(ENOMEM);
2050 
2051  do {
2052  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2053  goto the_end;
2054 
2055  if (got_frame) {
2056  tb = (AVRational){1, frame->sample_rate};
2057 
2058 #if CONFIG_AVFILTER
2059  dec_channel_layout = get_valid_channel_layout(frame->channel_layout, frame->channels);
2060 
2061  reconfigure =
2062  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2063  frame->format, frame->channels) ||
2064  is->audio_filter_src.channel_layout != dec_channel_layout ||
2065  is->audio_filter_src.freq != frame->sample_rate ||
2066  is->auddec.pkt_serial != last_serial;
2067 
2068  if (reconfigure) {
2069  char buf1[1024], buf2[1024];
2070  av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2071  av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2073  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2074  is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2075  frame->sample_rate, frame->channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
2076 
2077  is->audio_filter_src.fmt = frame->format;
2078  is->audio_filter_src.channels = frame->channels;
2079  is->audio_filter_src.channel_layout = dec_channel_layout;
2080  is->audio_filter_src.freq = frame->sample_rate;
2081  last_serial = is->auddec.pkt_serial;
2082 
2083  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2084  goto the_end;
2085  }
2086 
2087  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2088  goto the_end;
2089 
2090  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2091  tb = av_buffersink_get_time_base(is->out_audio_filter);
2092 #endif
2093  if (!(af = frame_queue_peek_writable(&is->sampq)))
2094  goto the_end;
2095 
2096  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2097  af->pos = frame->pkt_pos;
2098  af->serial = is->auddec.pkt_serial;
2099  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2100 
2102  frame_queue_push(&is->sampq);
2103 
2104 #if CONFIG_AVFILTER
2105  if (is->audioq.serial != is->auddec.pkt_serial)
2106  break;
2107  }
2108  if (ret == AVERROR_EOF)
2109  is->auddec.finished = is->auddec.pkt_serial;
2110 #endif
2111  }
2112  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2113  the_end:
2114 #if CONFIG_AVFILTER
2115  avfilter_graph_free(&is->agraph);
2116 #endif
2117  av_frame_free(&frame);
2118  return ret;
2119 }
2120 
2121 static int decoder_start(Decoder *d, int (*fn)(void *), const char *thread_name, void* arg)
2122 {
2124  d->decoder_tid = SDL_CreateThread(fn, thread_name, arg);
2125  if (!d->decoder_tid) {
2126  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2127  return AVERROR(ENOMEM);
2128  }
2129  return 0;
2130 }
2131 
2132 static int video_thread(void *arg)
2133 {
2134  VideoState *is = arg;
2136  double pts;
2137  double duration;
2138  int ret;
2139  AVRational tb = is->video_st->time_base;
2140  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2141 
2142 #if CONFIG_AVFILTER
2143  AVFilterGraph *graph = NULL;
2144  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2145  int last_w = 0;
2146  int last_h = 0;
2147  enum AVPixelFormat last_format = -2;
2148  int last_serial = -1;
2149  int last_vfilter_idx = 0;
2150 #endif
2151 
2152  if (!frame)
2153  return AVERROR(ENOMEM);
2154 
2155  for (;;) {
2157  if (ret < 0)
2158  goto the_end;
2159  if (!ret)
2160  continue;
2161 
2162 #if CONFIG_AVFILTER
2163  if ( last_w != frame->width
2164  || last_h != frame->height
2165  || last_format != frame->format
2166  || last_serial != is->viddec.pkt_serial
2167  || last_vfilter_idx != is->vfilter_idx) {
2169  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2170  last_w, last_h,
2171  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2172  frame->width, frame->height,
2173  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2174  avfilter_graph_free(&graph);
2175  graph = avfilter_graph_alloc();
2176  if (!graph) {
2177  ret = AVERROR(ENOMEM);
2178  goto the_end;
2179  }
2180  graph->nb_threads = filter_nbthreads;
2181  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2182  SDL_Event event;
2183  event.type = FF_QUIT_EVENT;
2184  event.user.data1 = is;
2185  SDL_PushEvent(&event);
2186  goto the_end;
2187  }
2188  filt_in = is->in_video_filter;
2189  filt_out = is->out_video_filter;
2190  last_w = frame->width;
2191  last_h = frame->height;
2192  last_format = frame->format;
2193  last_serial = is->viddec.pkt_serial;
2194  last_vfilter_idx = is->vfilter_idx;
2195  frame_rate = av_buffersink_get_frame_rate(filt_out);
2196  }
2197 
2198  ret = av_buffersrc_add_frame(filt_in, frame);
2199  if (ret < 0)
2200  goto the_end;
2201 
2202  while (ret >= 0) {
2203  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2204 
2205  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2206  if (ret < 0) {
2207  if (ret == AVERROR_EOF)
2208  is->viddec.finished = is->viddec.pkt_serial;
2209  ret = 0;
2210  break;
2211  }
2212 
2213  is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
2214  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2215  is->frame_last_filter_delay = 0;
2216  tb = av_buffersink_get_time_base(filt_out);
2217 #endif
2218  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2219  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2220  ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);
2222 #if CONFIG_AVFILTER
2223  if (is->videoq.serial != is->viddec.pkt_serial)
2224  break;
2225  }
2226 #endif
2227 
2228  if (ret < 0)
2229  goto the_end;
2230  }
2231  the_end:
2232 #if CONFIG_AVFILTER
2233  avfilter_graph_free(&graph);
2234 #endif
2235  av_frame_free(&frame);
2236  return 0;
2237 }
2238 
2239 static int subtitle_thread(void *arg)
2240 {
2241  VideoState *is = arg;
2242  Frame *sp;
2243  int got_subtitle;
2244  double pts;
2245 
2246  for (;;) {
2247  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2248  return 0;
2249 
2250  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2251  break;
2252 
2253  pts = 0;
2254 
2255  if (got_subtitle && sp->sub.format == 0) {
2256  if (sp->sub.pts != AV_NOPTS_VALUE)
2257  pts = sp->sub.pts / (double)AV_TIME_BASE;
2258  sp->pts = pts;
2259  sp->serial = is->subdec.pkt_serial;
2260  sp->width = is->subdec.avctx->width;
2261  sp->height = is->subdec.avctx->height;
2262  sp->uploaded = 0;
2263 
2264  /* now we can update the picture count */
2265  frame_queue_push(&is->subpq);
2266  } else if (got_subtitle) {
2267  avsubtitle_free(&sp->sub);
2268  }
2269  }
2270  return 0;
2271 }
2272 
2273 /* copy samples for viewing in editor window */
2274 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2275 {
2276  int size, len;
2277 
2278  size = samples_size / sizeof(short);
2279  while (size > 0) {
2280  len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2281  if (len > size)
2282  len = size;
2283  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2284  samples += len;
2285  is->sample_array_index += len;
2286  if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2287  is->sample_array_index = 0;
2288  size -= len;
2289  }
2290 }
2291 
2292 /* return the wanted number of samples to get better sync if sync_type is video
2293  * or external master clock */
2294 static int synchronize_audio(VideoState *is, int nb_samples)
2295 {
2296  int wanted_nb_samples = nb_samples;
2297 
2298  /* if not master, then we try to remove or add samples to correct the clock */
2300  double diff, avg_diff;
2301  int min_nb_samples, max_nb_samples;
2302 
2303  diff = get_clock(&is->audclk) - get_master_clock(is);
2304 
2305  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2306  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2307  if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2308  /* not enough measures to have a correct estimate */
2309  is->audio_diff_avg_count++;
2310  } else {
2311  /* estimate the A-V difference */
2312  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2313 
2314  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2315  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2316  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2317  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2318  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2319  }
2320  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2321  diff, avg_diff, wanted_nb_samples - nb_samples,
2322  is->audio_clock, is->audio_diff_threshold);
2323  }
2324  } else {
2325  /* too big difference : may be initial PTS errors, so
2326  reset A-V filter */
2327  is->audio_diff_avg_count = 0;
2328  is->audio_diff_cum = 0;
2329  }
2330  }
2331 
2332  return wanted_nb_samples;
2333 }
2334 
2335 /**
2336  * Decode one audio frame and return its uncompressed size.
2337  *
2338  * The processed audio frame is decoded, converted if required, and
2339  * stored in is->audio_buf, with size in bytes given by the return
2340  * value.
2341  */
2343 {
2344  int data_size, resampled_data_size;
2345  int64_t dec_channel_layout;
2346  av_unused double audio_clock0;
2347  int wanted_nb_samples;
2348  Frame *af;
2349 
2350  if (is->paused)
2351  return -1;
2352 
2353  do {
2354 #if defined(_WIN32)
2355  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2356  if ((av_gettime_relative() - audio_callback_time) > 1000000LL * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec / 2)
2357  return -1;
2358  av_usleep (1000);
2359  }
2360 #endif
2361  if (!(af = frame_queue_peek_readable(&is->sampq)))
2362  return -1;
2363  frame_queue_next(&is->sampq);
2364  } while (af->serial != is->audioq.serial);
2365 
2366  data_size = av_samples_get_buffer_size(NULL, af->frame->channels,
2367  af->frame->nb_samples,
2368  af->frame->format, 1);
2369 
2370  dec_channel_layout =
2373  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2374 
2375  if (af->frame->format != is->audio_src.fmt ||
2376  dec_channel_layout != is->audio_src.channel_layout ||
2377  af->frame->sample_rate != is->audio_src.freq ||
2378  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2379  swr_free(&is->swr_ctx);
2380  is->swr_ctx = swr_alloc_set_opts(NULL,
2381  is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2382  dec_channel_layout, af->frame->format, af->frame->sample_rate,
2383  0, NULL);
2384  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2386  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2388  is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
2389  swr_free(&is->swr_ctx);
2390  return -1;
2391  }
2392  is->audio_src.channel_layout = dec_channel_layout;
2393  is->audio_src.channels = af->frame->channels;
2394  is->audio_src.freq = af->frame->sample_rate;
2395  is->audio_src.fmt = af->frame->format;
2396  }
2397 
2398  if (is->swr_ctx) {
2399  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2400  uint8_t **out = &is->audio_buf1;
2401  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2402  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2403  int len2;
2404  if (out_size < 0) {
2405  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2406  return -1;
2407  }
2408  if (wanted_nb_samples != af->frame->nb_samples) {
2409  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2410  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2411  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2412  return -1;
2413  }
2414  }
2415  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2416  if (!is->audio_buf1)
2417  return AVERROR(ENOMEM);
2418  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2419  if (len2 < 0) {
2420  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2421  return -1;
2422  }
2423  if (len2 == out_count) {
2424  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2425  if (swr_init(is->swr_ctx) < 0)
2426  swr_free(&is->swr_ctx);
2427  }
2428  is->audio_buf = is->audio_buf1;
2429  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2430  } else {
2431  is->audio_buf = af->frame->data[0];
2432  resampled_data_size = data_size;
2433  }
2434 
2435  audio_clock0 = is->audio_clock;
2436  /* update the audio clock with the pts */
2437  if (!isnan(af->pts))
2438  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2439  else
2440  is->audio_clock = NAN;
2441  is->audio_clock_serial = af->serial;
2442 #ifdef DEBUG
2443  {
2444  static double last_clock;
2445  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2446  is->audio_clock - last_clock,
2447  is->audio_clock, audio_clock0);
2448  last_clock = is->audio_clock;
2449  }
2450 #endif
2451  return resampled_data_size;
2452 }
2453 
2454 /* prepare a new audio buffer */
2455 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2456 {
2457  VideoState *is = opaque;
2458  int audio_size, len1;
2459 
2461 
2462  while (len > 0) {
2463  if (is->audio_buf_index >= is->audio_buf_size) {
2464  audio_size = audio_decode_frame(is);
2465  if (audio_size < 0) {
2466  /* if error, just output silence */
2467  is->audio_buf = NULL;
2468  is->audio_buf_size = SDL_AUDIO_MIN_BUFFER_SIZE / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2469  } else {
2470  if (is->show_mode != SHOW_MODE_VIDEO)
2471  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2472  is->audio_buf_size = audio_size;
2473  }
2474  is->audio_buf_index = 0;
2475  }
2476  len1 = is->audio_buf_size - is->audio_buf_index;
2477  if (len1 > len)
2478  len1 = len;
2479  if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2480  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2481  else {
2482  memset(stream, 0, len1);
2483  if (!is->muted && is->audio_buf)
2484  SDL_MixAudioFormat(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, is->audio_volume);
2485  }
2486  len -= len1;
2487  stream += len1;
2488  is->audio_buf_index += len1;
2489  }
2490  is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2491  /* Let's assume the audio driver that is used by SDL has two periods. */
2492  if (!isnan(is->audio_clock)) {
2493  set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
2494  sync_clock_to_slave(&is->extclk, &is->audclk);
2495  }
2496 }
2497 
2498 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2499 {
2500  SDL_AudioSpec wanted_spec, spec;
2501  const char *env;
2502  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2503  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2504  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2505 
2506  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2507  if (env) {
2508  wanted_nb_channels = atoi(env);
2509  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2510  }
2511  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2512  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2513  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2514  }
2515  wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2516  wanted_spec.channels = wanted_nb_channels;
2517  wanted_spec.freq = wanted_sample_rate;
2518  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2519  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2520  return -1;
2521  }
2522  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2523  next_sample_rate_idx--;
2524  wanted_spec.format = AUDIO_S16SYS;
2525  wanted_spec.silence = 0;
2526  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2527  wanted_spec.callback = sdl_audio_callback;
2528  wanted_spec.userdata = opaque;
2529  while (!(audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2530  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2531  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2532  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2533  if (!wanted_spec.channels) {
2534  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2535  wanted_spec.channels = wanted_nb_channels;
2536  if (!wanted_spec.freq) {
2538  "No more combinations to try, audio open failed\n");
2539  return -1;
2540  }
2541  }
2542  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2543  }
2544  if (spec.format != AUDIO_S16SYS) {
2546  "SDL advised audio format %d is not supported!\n", spec.format);
2547  return -1;
2548  }
2549  if (spec.channels != wanted_spec.channels) {
2550  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2551  if (!wanted_channel_layout) {
2553  "SDL advised channel count %d is not supported!\n", spec.channels);
2554  return -1;
2555  }
2556  }
2557 
2558  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2559  audio_hw_params->freq = spec.freq;
2560  audio_hw_params->channel_layout = wanted_channel_layout;
2561  audio_hw_params->channels = spec.channels;
2562  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
2563  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2564  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2565  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2566  return -1;
2567  }
2568  return spec.size;
2569 }
2570 
2571 /* open a given stream. Return 0 if OK */
2572 static int stream_component_open(VideoState *is, int stream_index)
2573 {
2574  AVFormatContext *ic = is->ic;
2575  AVCodecContext *avctx;
2576  const AVCodec *codec;
2577  const char *forced_codec_name = NULL;
2578  AVDictionary *opts = NULL;
2579  AVDictionaryEntry *t = NULL;
2580  int sample_rate, nb_channels;
2581  int64_t channel_layout;
2582  int ret = 0;
2583  int stream_lowres = lowres;
2584 
2585  if (stream_index < 0 || stream_index >= ic->nb_streams)
2586  return -1;
2587 
2588  avctx = avcodec_alloc_context3(NULL);
2589  if (!avctx)
2590  return AVERROR(ENOMEM);
2591 
2592  ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2593  if (ret < 0)
2594  goto fail;
2595  avctx->pkt_timebase = ic->streams[stream_index]->time_base;
2596 
2597  codec = avcodec_find_decoder(avctx->codec_id);
2598 
2599  switch(avctx->codec_type){
2600  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2601  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2602  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2603  }
2604  if (forced_codec_name)
2605  codec = avcodec_find_decoder_by_name(forced_codec_name);
2606  if (!codec) {
2607  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2608  "No codec could be found with name '%s'\n", forced_codec_name);
2609  else av_log(NULL, AV_LOG_WARNING,
2610  "No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id));
2611  ret = AVERROR(EINVAL);
2612  goto fail;
2613  }
2614 
2615  avctx->codec_id = codec->id;
2616  if (stream_lowres > codec->max_lowres) {
2617  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2618  codec->max_lowres);
2619  stream_lowres = codec->max_lowres;
2620  }
2621  avctx->lowres = stream_lowres;
2622 
2623  if (fast)
2624  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2625 
2626  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2627  if (!av_dict_get(opts, "threads", NULL, 0))
2628  av_dict_set(&opts, "threads", "auto", 0);
2629  if (stream_lowres)
2630  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2631  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2632  goto fail;
2633  }
2634  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2635  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2637  goto fail;
2638  }
2639 
2640  is->eof = 0;
2641  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2642  switch (avctx->codec_type) {
2643  case AVMEDIA_TYPE_AUDIO:
2644 #if CONFIG_AVFILTER
2645  {
2646  AVFilterContext *sink;
2647 
2648  is->audio_filter_src.freq = avctx->sample_rate;
2649  is->audio_filter_src.channels = avctx->channels;
2650  is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2651  is->audio_filter_src.fmt = avctx->sample_fmt;
2652  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2653  goto fail;
2654  sink = is->out_audio_filter;
2657  channel_layout = av_buffersink_get_channel_layout(sink);
2658  }
2659 #else
2660  sample_rate = avctx->sample_rate;
2661  nb_channels = avctx->channels;
2662  channel_layout = avctx->channel_layout;
2663 #endif
2664 
2665  /* prepare audio output */
2666  if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2667  goto fail;
2668  is->audio_hw_buf_size = ret;
2669  is->audio_src = is->audio_tgt;
2670  is->audio_buf_size = 0;
2671  is->audio_buf_index = 0;
2672 
2673  /* init averaging filter */
2674  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2675  is->audio_diff_avg_count = 0;
2676  /* since we do not have a precise anough audio FIFO fullness,
2677  we correct audio sync only if larger than this threshold */
2678  is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;
2679 
2680  is->audio_stream = stream_index;
2681  is->audio_st = ic->streams[stream_index];
2682 
2683  if ((ret = decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread)) < 0)
2684  goto fail;
2685  if ((is->ic->iformat->flags & (AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH | AVFMT_NO_BYTE_SEEK)) && !is->ic->iformat->read_seek) {
2686  is->auddec.start_pts = is->audio_st->start_time;
2687  is->auddec.start_pts_tb = is->audio_st->time_base;
2688  }
2689  if ((ret = decoder_start(&is->auddec, audio_thread, "audio_decoder", is)) < 0)
2690  goto out;
2691  SDL_PauseAudioDevice(audio_dev, 0);
2692  break;
2693  case AVMEDIA_TYPE_VIDEO:
2694  is->video_stream = stream_index;
2695  is->video_st = ic->streams[stream_index];
2696 
2697  if ((ret = decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread)) < 0)
2698  goto fail;
2699  if ((ret = decoder_start(&is->viddec, video_thread, "video_decoder", is)) < 0)
2700  goto out;
2701  is->queue_attachments_req = 1;
2702  break;
2703  case AVMEDIA_TYPE_SUBTITLE:
2704  is->subtitle_stream = stream_index;
2705  is->subtitle_st = ic->streams[stream_index];
2706 
2707  if ((ret = decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread)) < 0)
2708  goto fail;
2709  if ((ret = decoder_start(&is->subdec, subtitle_thread, "subtitle_decoder", is)) < 0)
2710  goto out;
2711  break;
2712  default:
2713  break;
2714  }
2715  goto out;
2716 
2717 fail:
2718  avcodec_free_context(&avctx);
2719 out:
2720  av_dict_free(&opts);
2721 
2722  return ret;
2723 }
2724 
2725 static int decode_interrupt_cb(void *ctx)
2726 {
2727  VideoState *is = ctx;
2728  return is->abort_request;
2729 }
2730 
2731 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2732  return stream_id < 0 ||
2733  queue->abort_request ||
2735  queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2736 }
2737 
2739 {
2740  if( !strcmp(s->iformat->name, "rtp")
2741  || !strcmp(s->iformat->name, "rtsp")
2742  || !strcmp(s->iformat->name, "sdp")
2743  )
2744  return 1;
2745 
2746  if(s->pb && ( !strncmp(s->url, "rtp:", 4)
2747  || !strncmp(s->url, "udp:", 4)
2748  )
2749  )
2750  return 1;
2751  return 0;
2752 }
2753 
2754 /* this thread gets the stream from the disk or the network */
2755 static int read_thread(void *arg)
2756 {
2757  VideoState *is = arg;
2758  AVFormatContext *ic = NULL;
2759  int err, i, ret;
2760  int st_index[AVMEDIA_TYPE_NB];
2761  AVPacket *pkt = NULL;
2762  int64_t stream_start_time;
2763  int pkt_in_play_range = 0;
2764  AVDictionaryEntry *t;
2765  SDL_mutex *wait_mutex = SDL_CreateMutex();
2766  int scan_all_pmts_set = 0;
2767  int64_t pkt_ts;
2768 
2769  if (!wait_mutex) {
2770  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2771  ret = AVERROR(ENOMEM);
2772  goto fail;
2773  }
2774 
2775  memset(st_index, -1, sizeof(st_index));
2776  is->eof = 0;
2777 
2778  pkt = av_packet_alloc();
2779  if (!pkt) {
2780  av_log(NULL, AV_LOG_FATAL, "Could not allocate packet.\n");
2781  ret = AVERROR(ENOMEM);
2782  goto fail;
2783  }
2784  ic = avformat_alloc_context();
2785  if (!ic) {
2786  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2787  ret = AVERROR(ENOMEM);
2788  goto fail;
2789  }
2792  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2793  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2794  scan_all_pmts_set = 1;
2795  }
2796  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2797  if (err < 0) {
2798  print_error(is->filename, err);
2799  ret = -1;
2800  goto fail;
2801  }
2802  if (scan_all_pmts_set)
2803  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2804 
2806  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2808  goto fail;
2809  }
2810  is->ic = ic;
2811 
2812  if (genpts)
2813  ic->flags |= AVFMT_FLAG_GENPTS;
2814 
2816 
2817  if (find_stream_info) {
2819  int orig_nb_streams = ic->nb_streams;
2820 
2821  err = avformat_find_stream_info(ic, opts);
2822 
2823  for (i = 0; i < orig_nb_streams; i++)
2824  av_dict_free(&opts[i]);
2825  av_freep(&opts);
2826 
2827  if (err < 0) {
2829  "%s: could not find codec parameters\n", is->filename);
2830  ret = -1;
2831  goto fail;
2832  }
2833  }
2834 
2835  if (ic->pb)
2836  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2837 
2838  if (seek_by_bytes < 0)
2839  seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2840 
2841  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2842 
2843  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2844  window_title = av_asprintf("%s - %s", t->value, input_filename);
2845 
2846  /* if seeking requested, we execute it */
2847  if (start_time != AV_NOPTS_VALUE) {
2848  int64_t timestamp;
2849 
2850  timestamp = start_time;
2851  /* add the stream start time */
2852  if (ic->start_time != AV_NOPTS_VALUE)
2853  timestamp += ic->start_time;
2854  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2855  if (ret < 0) {
2856  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2857  is->filename, (double)timestamp / AV_TIME_BASE);
2858  }
2859  }
2860 
2861  is->realtime = is_realtime(ic);
2862 
2863  if (show_status)
2864  av_dump_format(ic, 0, is->filename, 0);
2865 
2866  for (i = 0; i < ic->nb_streams; i++) {
2867  AVStream *st = ic->streams[i];
2868  enum AVMediaType type = st->codecpar->codec_type;
2869  st->discard = AVDISCARD_ALL;
2870  if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
2872  st_index[type] = i;
2873  }
2874  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2875  if (wanted_stream_spec[i] && st_index[i] == -1) {
2876  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2877  st_index[i] = INT_MAX;
2878  }
2879  }
2880 
2881  if (!video_disable)
2882  st_index[AVMEDIA_TYPE_VIDEO] =
2884  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2885  if (!audio_disable)
2886  st_index[AVMEDIA_TYPE_AUDIO] =
2888  st_index[AVMEDIA_TYPE_AUDIO],
2889  st_index[AVMEDIA_TYPE_VIDEO],
2890  NULL, 0);
2892  st_index[AVMEDIA_TYPE_SUBTITLE] =
2894  st_index[AVMEDIA_TYPE_SUBTITLE],
2895  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2896  st_index[AVMEDIA_TYPE_AUDIO] :
2897  st_index[AVMEDIA_TYPE_VIDEO]),
2898  NULL, 0);
2899 
2900  is->show_mode = show_mode;
2901  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2902  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2903  AVCodecParameters *codecpar = st->codecpar;
2905  if (codecpar->width)
2906  set_default_window_size(codecpar->width, codecpar->height, sar);
2907  }
2908 
2909  /* open the streams */
2910  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2912  }
2913 
2914  ret = -1;
2915  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2917  }
2918  if (is->show_mode == SHOW_MODE_NONE)
2919  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2920 
2921  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2923  }
2924 
2925  if (is->video_stream < 0 && is->audio_stream < 0) {
2926  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2927  is->filename);
2928  ret = -1;
2929  goto fail;
2930  }
2931 
2932  if (infinite_buffer < 0 && is->realtime)
2933  infinite_buffer = 1;
2934 
2935  for (;;) {
2936  if (is->abort_request)
2937  break;
2938  if (is->paused != is->last_paused) {
2939  is->last_paused = is->paused;
2940  if (is->paused)
2941  is->read_pause_return = av_read_pause(ic);
2942  else
2943  av_read_play(ic);
2944  }
2945 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2946  if (is->paused &&
2947  (!strcmp(ic->iformat->name, "rtsp") ||
2948  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2949  /* wait 10 ms to avoid trying to get another packet */
2950  /* XXX: horrible */
2951  SDL_Delay(10);
2952  continue;
2953  }
2954 #endif
2955  if (is->seek_req) {
2956  int64_t seek_target = is->seek_pos;
2957  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2958  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2959 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2960 // of the seek_pos/seek_rel variables
2961 
2962  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2963  if (ret < 0) {
2965  "%s: error while seeking\n", is->ic->url);
2966  } else {
2967  if (is->audio_stream >= 0)
2968  packet_queue_flush(&is->audioq);
2969  if (is->subtitle_stream >= 0)
2970  packet_queue_flush(&is->subtitleq);
2971  if (is->video_stream >= 0)
2972  packet_queue_flush(&is->videoq);
2973  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2974  set_clock(&is->extclk, NAN, 0);
2975  } else {
2976  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
2977  }
2978  }
2979  is->seek_req = 0;
2980  is->queue_attachments_req = 1;
2981  is->eof = 0;
2982  if (is->paused)
2984  }
2985  if (is->queue_attachments_req) {
2986  if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
2987  if ((ret = av_packet_ref(pkt, &is->video_st->attached_pic)) < 0)
2988  goto fail;
2989  packet_queue_put(&is->videoq, pkt);
2990  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
2991  }
2992  is->queue_attachments_req = 0;
2993  }
2994 
2995  /* if the queue are full, no need to read more */
2996  if (infinite_buffer<1 &&
2997  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2998  || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
2999  stream_has_enough_packets(is->video_st, is->video_stream, &is->videoq) &&
3000  stream_has_enough_packets(is->subtitle_st, is->subtitle_stream, &is->subtitleq)))) {
3001  /* wait 10 ms */
3002  SDL_LockMutex(wait_mutex);
3003  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3004  SDL_UnlockMutex(wait_mutex);
3005  continue;
3006  }
3007  if (!is->paused &&
3008  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3009  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3010  if (loop != 1 && (!loop || --loop)) {
3012  } else if (autoexit) {
3013  ret = AVERROR_EOF;
3014  goto fail;
3015  }
3016  }
3017  ret = av_read_frame(ic, pkt);
3018  if (ret < 0) {
3019  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3020  if (is->video_stream >= 0)
3021  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
3022  if (is->audio_stream >= 0)
3023  packet_queue_put_nullpacket(&is->audioq, pkt, is->audio_stream);
3024  if (is->subtitle_stream >= 0)
3025  packet_queue_put_nullpacket(&is->subtitleq, pkt, is->subtitle_stream);
3026  is->eof = 1;
3027  }
3028  if (ic->pb && ic->pb->error) {
3029  if (autoexit)
3030  goto fail;
3031  else
3032  break;
3033  }
3034  SDL_LockMutex(wait_mutex);
3035  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3036  SDL_UnlockMutex(wait_mutex);
3037  continue;
3038  } else {
3039  is->eof = 0;
3040  }
3041  /* check if packet is in play range specified by user, then queue, otherwise discard */
3042  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3043  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3044  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3045  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3047  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3048  <= ((double)duration / 1000000);
3049  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3050  packet_queue_put(&is->audioq, pkt);
3051  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3052  && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
3053  packet_queue_put(&is->videoq, pkt);
3054  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3055  packet_queue_put(&is->subtitleq, pkt);
3056  } else {
3058  }
3059  }
3060 
3061  ret = 0;
3062  fail:
3063  if (ic && !is->ic)
3064  avformat_close_input(&ic);
3065 
3066  av_packet_free(&pkt);
3067  if (ret != 0) {
3068  SDL_Event event;
3069 
3070  event.type = FF_QUIT_EVENT;
3071  event.user.data1 = is;
3072  SDL_PushEvent(&event);
3073  }
3074  SDL_DestroyMutex(wait_mutex);
3075  return 0;
3076 }
3077 
3078 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
3079 {
3080  VideoState *is;
3081 
3082  is = av_mallocz(sizeof(VideoState));
3083  if (!is)
3084  return NULL;
3085  is->last_video_stream = is->video_stream = -1;
3086  is->last_audio_stream = is->audio_stream = -1;
3087  is->last_subtitle_stream = is->subtitle_stream = -1;
3088  is->filename = av_strdup(filename);
3089  if (!is->filename)
3090  goto fail;
3091  is->iformat = iformat;
3092  is->ytop = 0;
3093  is->xleft = 0;
3094 
3095  /* start video display */
3096  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3097  goto fail;
3098  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3099  goto fail;
3100  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3101  goto fail;
3102 
3103  if (packet_queue_init(&is->videoq) < 0 ||
3104  packet_queue_init(&is->audioq) < 0 ||
3105  packet_queue_init(&is->subtitleq) < 0)
3106  goto fail;
3107 
3108  if (!(is->continue_read_thread = SDL_CreateCond())) {
3109  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3110  goto fail;
3111  }
3112 
3113  init_clock(&is->vidclk, &is->videoq.serial);
3114  init_clock(&is->audclk, &is->audioq.serial);
3115  init_clock(&is->extclk, &is->extclk.serial);
3116  is->audio_clock_serial = -1;
3117  if (startup_volume < 0)
3118  av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
3119  if (startup_volume > 100)
3120  av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
3122  startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
3123  is->audio_volume = startup_volume;
3124  is->muted = 0;
3125  is->av_sync_type = av_sync_type;
3126  is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
3127  if (!is->read_tid) {
3128  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3129 fail:
3130  stream_close(is);
3131  return NULL;
3132  }
3133  return is;
3134 }
3135 
3137 {
3138  AVFormatContext *ic = is->ic;
3139  int start_index, stream_index;
3140  int old_index;
3141  AVStream *st;
3142  AVProgram *p = NULL;
3143  int nb_streams = is->ic->nb_streams;
3144 
3145  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3146  start_index = is->last_video_stream;
3147  old_index = is->video_stream;
3148  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3149  start_index = is->last_audio_stream;
3150  old_index = is->audio_stream;
3151  } else {
3152  start_index = is->last_subtitle_stream;
3153  old_index = is->subtitle_stream;
3154  }
3155  stream_index = start_index;
3156 
3157  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3158  p = av_find_program_from_stream(ic, NULL, is->video_stream);
3159  if (p) {
3161  for (start_index = 0; start_index < nb_streams; start_index++)
3162  if (p->stream_index[start_index] == stream_index)
3163  break;
3164  if (start_index == nb_streams)
3165  start_index = -1;
3166  stream_index = start_index;
3167  }
3168  }
3169 
3170  for (;;) {
3171  if (++stream_index >= nb_streams)
3172  {
3174  {
3175  stream_index = -1;
3176  is->last_subtitle_stream = -1;
3177  goto the_end;
3178  }
3179  if (start_index == -1)
3180  return;
3181  stream_index = 0;
3182  }
3183  if (stream_index == start_index)
3184  return;
3185  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3186  if (st->codecpar->codec_type == codec_type) {
3187  /* check that parameters are OK */
3188  switch (codec_type) {
3189  case AVMEDIA_TYPE_AUDIO:
3190  if (st->codecpar->sample_rate != 0 &&
3191  st->codecpar->channels != 0)
3192  goto the_end;
3193  break;
3194  case AVMEDIA_TYPE_VIDEO:
3195  case AVMEDIA_TYPE_SUBTITLE:
3196  goto the_end;
3197  default:
3198  break;
3199  }
3200  }
3201  }
3202  the_end:
3203  if (p && stream_index != -1)
3204  stream_index = p->stream_index[stream_index];
3205  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3207  old_index,
3208  stream_index);
3209 
3210  stream_component_close(is, old_index);
3211  stream_component_open(is, stream_index);
3212 }
3213 
3214 
3216 {
3218  SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
3219 }
3220 
3222 {
3223  int next = is->show_mode;
3224  do {
3225  next = (next + 1) % SHOW_MODE_NB;
3226  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3227  if (is->show_mode != next) {
3228  is->force_refresh = 1;
3229  is->show_mode = next;
3230  }
3231 }
3232 
3233 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3234  double remaining_time = 0.0;
3235  SDL_PumpEvents();
3236  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3238  SDL_ShowCursor(0);
3239  cursor_hidden = 1;
3240  }
3241  if (remaining_time > 0.0)
3242  av_usleep((int64_t)(remaining_time * 1000000.0));
3243  remaining_time = REFRESH_RATE;
3244  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3245  video_refresh(is, &remaining_time);
3246  SDL_PumpEvents();
3247  }
3248 }
3249 
3250 static void seek_chapter(VideoState *is, int incr)
3251 {
3252  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3253  int i;
3254 
3255  if (!is->ic->nb_chapters)
3256  return;
3257 
3258  /* find the current chapter */
3259  for (i = 0; i < is->ic->nb_chapters; i++) {
3260  AVChapter *ch = is->ic->chapters[i];
3261  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3262  i--;
3263  break;
3264  }
3265  }
3266 
3267  i += incr;
3268  i = FFMAX(i, 0);
3269  if (i >= is->ic->nb_chapters)
3270  return;
3271 
3272  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3273  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3274  AV_TIME_BASE_Q), 0, 0);
3275 }
3276 
3277 /* handle an event sent by the GUI */
3278 static void event_loop(VideoState *cur_stream)
3279 {
3280  SDL_Event event;
3281  double incr, pos, frac;
3282 
3283  for (;;) {
3284  double x;
3285  refresh_loop_wait_event(cur_stream, &event);
3286  switch (event.type) {
3287  case SDL_KEYDOWN:
3288  if (exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
3289  do_exit(cur_stream);
3290  break;
3291  }
3292  // If we don't yet have a window, skip all key events, because read_thread might still be initializing...
3293  if (!cur_stream->width)
3294  continue;
3295  switch (event.key.keysym.sym) {
3296  case SDLK_f:
3297  toggle_full_screen(cur_stream);
3298  cur_stream->force_refresh = 1;
3299  break;
3300  case SDLK_p:
3301  case SDLK_SPACE:
3302  toggle_pause(cur_stream);
3303  break;
3304  case SDLK_m:
3305  toggle_mute(cur_stream);
3306  break;
3307  case SDLK_KP_MULTIPLY:
3308  case SDLK_0:
3309  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3310  break;
3311  case SDLK_KP_DIVIDE:
3312  case SDLK_9:
3313  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3314  break;
3315  case SDLK_s: // S: Step to next frame
3316  step_to_next_frame(cur_stream);
3317  break;
3318  case SDLK_a:
3320  break;
3321  case SDLK_v:
3323  break;
3324  case SDLK_c:
3328  break;
3329  case SDLK_t:
3331  break;
3332  case SDLK_w:
3333 #if CONFIG_AVFILTER
3334  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3335  if (++cur_stream->vfilter_idx >= nb_vfilters)
3336  cur_stream->vfilter_idx = 0;
3337  } else {
3338  cur_stream->vfilter_idx = 0;
3339  toggle_audio_display(cur_stream);
3340  }
3341 #else
3342  toggle_audio_display(cur_stream);
3343 #endif
3344  break;
3345  case SDLK_PAGEUP:
3346  if (cur_stream->ic->nb_chapters <= 1) {
3347  incr = 600.0;
3348  goto do_seek;
3349  }
3350  seek_chapter(cur_stream, 1);
3351  break;
3352  case SDLK_PAGEDOWN:
3353  if (cur_stream->ic->nb_chapters <= 1) {
3354  incr = -600.0;
3355  goto do_seek;
3356  }
3357  seek_chapter(cur_stream, -1);
3358  break;
3359  case SDLK_LEFT:
3360  incr = seek_interval ? -seek_interval : -10.0;
3361  goto do_seek;
3362  case SDLK_RIGHT:
3363  incr = seek_interval ? seek_interval : 10.0;
3364  goto do_seek;
3365  case SDLK_UP:
3366  incr = 60.0;
3367  goto do_seek;
3368  case SDLK_DOWN:
3369  incr = -60.0;
3370  do_seek:
3371  if (seek_by_bytes) {
3372  pos = -1;
3373  if (pos < 0 && cur_stream->video_stream >= 0)
3374  pos = frame_queue_last_pos(&cur_stream->pictq);
3375  if (pos < 0 && cur_stream->audio_stream >= 0)
3376  pos = frame_queue_last_pos(&cur_stream->sampq);
3377  if (pos < 0)
3378  pos = avio_tell(cur_stream->ic->pb);
3379  if (cur_stream->ic->bit_rate)
3380  incr *= cur_stream->ic->bit_rate / 8.0;
3381  else
3382  incr *= 180000.0;
3383  pos += incr;
3384  stream_seek(cur_stream, pos, incr, 1);
3385  } else {
3386  pos = get_master_clock(cur_stream);
3387  if (isnan(pos))
3388  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3389  pos += incr;
3390  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3391  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3392  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3393  }
3394  break;
3395  default:
3396  break;
3397  }
3398  break;
3399  case SDL_MOUSEBUTTONDOWN:
3400  if (exit_on_mousedown) {
3401  do_exit(cur_stream);
3402  break;
3403  }
3404  if (event.button.button == SDL_BUTTON_LEFT) {
3405  static int64_t last_mouse_left_click = 0;
3406  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3407  toggle_full_screen(cur_stream);
3408  cur_stream->force_refresh = 1;
3409  last_mouse_left_click = 0;
3410  } else {
3411  last_mouse_left_click = av_gettime_relative();
3412  }
3413  }
3414  case SDL_MOUSEMOTION:
3415  if (cursor_hidden) {
3416  SDL_ShowCursor(1);
3417  cursor_hidden = 0;
3418  }
3420  if (event.type == SDL_MOUSEBUTTONDOWN) {
3421  if (event.button.button != SDL_BUTTON_RIGHT)
3422  break;
3423  x = event.button.x;
3424  } else {
3425  if (!(event.motion.state & SDL_BUTTON_RMASK))
3426  break;
3427  x = event.motion.x;
3428  }
3429  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3430  uint64_t size = avio_size(cur_stream->ic->pb);
3431  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3432  } else {
3433  int64_t ts;
3434  int ns, hh, mm, ss;
3435  int tns, thh, tmm, tss;
3436  tns = cur_stream->ic->duration / 1000000LL;
3437  thh = tns / 3600;
3438  tmm = (tns % 3600) / 60;
3439  tss = (tns % 60);
3440  frac = x / cur_stream->width;
3441  ns = frac * tns;
3442  hh = ns / 3600;
3443  mm = (ns % 3600) / 60;
3444  ss = (ns % 60);
3446  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3447  hh, mm, ss, thh, tmm, tss);
3448  ts = frac * cur_stream->ic->duration;
3449  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3450  ts += cur_stream->ic->start_time;
3451  stream_seek(cur_stream, ts, 0, 0);
3452  }
3453  break;
3454  case SDL_WINDOWEVENT:
3455  switch (event.window.event) {
3456  case SDL_WINDOWEVENT_SIZE_CHANGED:
3457  screen_width = cur_stream->width = event.window.data1;
3458  screen_height = cur_stream->height = event.window.data2;
3459  if (cur_stream->vis_texture) {
3460  SDL_DestroyTexture(cur_stream->vis_texture);
3461  cur_stream->vis_texture = NULL;
3462  }
3463  case SDL_WINDOWEVENT_EXPOSED:
3464  cur_stream->force_refresh = 1;
3465  }
3466  break;
3467  case SDL_QUIT:
3468  case FF_QUIT_EVENT:
3469  do_exit(cur_stream);
3470  break;
3471  default:
3472  break;
3473  }
3474  }
3475 }
3476 
3477 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3478 {
3479  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3480  return opt_default(NULL, "video_size", arg);
3481 }
3482 
3483 static int opt_width(void *optctx, const char *opt, const char *arg)
3484 {
3485  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3486  return 0;
3487 }
3488 
3489 static int opt_height(void *optctx, const char *opt, const char *arg)
3490 {
3491  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3492  return 0;
3493 }
3494 
3495 static int opt_format(void *optctx, const char *opt, const char *arg)
3496 {
3498  if (!file_iformat) {
3499  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3500  return AVERROR(EINVAL);
3501  }
3502  return 0;
3503 }
3504 
3505 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3506 {
3507  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3508  return opt_default(NULL, "pixel_format", arg);
3509 }
3510 
3511 static int opt_sync(void *optctx, const char *opt, const char *arg)
3512 {
3513  if (!strcmp(arg, "audio"))
3515  else if (!strcmp(arg, "video"))
3517  else if (!strcmp(arg, "ext"))
3519  else {
3520  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3521  exit(1);
3522  }
3523  return 0;
3524 }
3525 
3526 static int opt_seek(void *optctx, const char *opt, const char *arg)
3527 {
3528  start_time = parse_time_or_die(opt, arg, 1);
3529  return 0;
3530 }
3531 
3532 static int opt_duration(void *optctx, const char *opt, const char *arg)
3533 {
3534  duration = parse_time_or_die(opt, arg, 1);
3535  return 0;
3536 }
3537 
3538 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3539 {
3540  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3541  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3542  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3543  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3544  return 0;
3545 }
3546 
3547 static void opt_input_file(void *optctx, const char *filename)
3548 {
3549  if (input_filename) {
3551  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3552  filename, input_filename);
3553  exit(1);
3554  }
3555  if (!strcmp(filename, "-"))
3556  filename = "pipe:";
3557  input_filename = filename;
3558 }
3559 
3560 static int opt_codec(void *optctx, const char *opt, const char *arg)
3561 {
3562  const char *spec = strchr(opt, ':');
3563  if (!spec) {
3565  "No media specifier was specified in '%s' in option '%s'\n",
3566  arg, opt);
3567  return AVERROR(EINVAL);
3568  }
3569  spec++;
3570  switch (spec[0]) {
3571  case 'a' : audio_codec_name = arg; break;
3572  case 's' : subtitle_codec_name = arg; break;
3573  case 'v' : video_codec_name = arg; break;
3574  default:
3576  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3577  return AVERROR(EINVAL);
3578  }
3579  return 0;
3580 }
3581 
3582 static int dummy;
3583 
3584 static const OptionDef options[] = {
3586  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3587  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3588  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3589  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3590  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3591  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3592  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3593  { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3594  { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3595  { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3596  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3597  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3598  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3599  { "seek_interval", OPT_FLOAT | HAS_ARG, { &seek_interval }, "set seek interval for left/right keys, in seconds", "seconds" },
3600  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3601  { "noborder", OPT_BOOL, { &borderless }, "borderless window" },
3602  { "alwaysontop", OPT_BOOL, { &alwaysontop }, "window always on top" },
3603  { "volume", OPT_INT | HAS_ARG, { &startup_volume}, "set startup volume 0=min 100=max", "volume" },
3604  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3605  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3606  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3607  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3608  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3609  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3610  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3611  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3612  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3613  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3614  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3615  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3616  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3617  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3618  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3619  { "left", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_left }, "set the x position for the left of the window", "x pos" },
3620  { "top", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_top }, "set the y position for the top of the window", "y pos" },
3621 #if CONFIG_AVFILTER
3622  { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3623  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3624 #endif
3625  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3626  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3627  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3628  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3629  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3630  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3631  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3632  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3633  { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3634  { "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
3635  "read and decode the streams to fill missing information with heuristics" },
3636  { "filter_threads", HAS_ARG | OPT_INT | OPT_EXPERT, { &filter_nbthreads }, "number of filter threads per graph" },
3637  { NULL, },
3638 };
3639 
3640 static void show_usage(void)
3641 {
3642  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3643  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3644  av_log(NULL, AV_LOG_INFO, "\n");
3645 }
3646 
3647 void show_help_default(const char *opt, const char *arg)
3648 {
3650  show_usage();
3651  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3652  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3653  printf("\n");
3656 #if !CONFIG_AVFILTER
3658 #else
3660 #endif
3661  printf("\nWhile playing:\n"
3662  "q, ESC quit\n"
3663  "f toggle full screen\n"
3664  "p, SPC pause\n"
3665  "m toggle mute\n"
3666  "9, 0 decrease and increase volume respectively\n"
3667  "/, * decrease and increase volume respectively\n"
3668  "a cycle audio channel in the current program\n"
3669  "v cycle video channel\n"
3670  "t cycle subtitle channel in the current program\n"
3671  "c cycle program\n"
3672  "w cycle video filters or show modes\n"
3673  "s activate frame-step mode\n"
3674  "left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
3675  "down/up seek backward/forward 1 minute\n"
3676  "page down/page up seek backward/forward 10 minutes\n"
3677  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3678  "left double-click toggle full screen\n"
3679  );
3680 }
3681 
3682 /* Called from the main */
3683 int main(int argc, char **argv)
3684 {
3685  int flags;
3686  VideoState *is;
3687 
3688  init_dynload();
3689 
3691  parse_loglevel(argc, argv, options);
3692 
3693  /* register all codecs, demux and protocols */
3694 #if CONFIG_AVDEVICE
3696 #endif
3698 
3699  init_opts();
3700 
3701  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3702  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3703 
3704  show_banner(argc, argv, options);
3705 
3706  parse_options(NULL, argc, argv, options, opt_input_file);
3707 
3708  if (!input_filename) {
3709  show_usage();
3710  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3712  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3713  exit(1);
3714  }
3715 
3716  if (display_disable) {
3717  video_disable = 1;
3718  }
3719  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3720  if (audio_disable)
3721  flags &= ~SDL_INIT_AUDIO;
3722  else {
3723  /* Try to work around an occasional ALSA buffer underflow issue when the
3724  * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3725  if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3726  SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
3727  }
3728  if (display_disable)
3729  flags &= ~SDL_INIT_VIDEO;
3730  if (SDL_Init (flags)) {
3731  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3732  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3733  exit(1);
3734  }
3735 
3736  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3737  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3738 
3739  if (!display_disable) {
3740  int flags = SDL_WINDOW_HIDDEN;
3741  if (alwaysontop)
3742 #if SDL_VERSION_ATLEAST(2,0,5)
3743  flags |= SDL_WINDOW_ALWAYS_ON_TOP;
3744 #else
3745  av_log(NULL, AV_LOG_WARNING, "Your SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.\n");
3746 #endif
3747  if (borderless)
3748  flags |= SDL_WINDOW_BORDERLESS;
3749  else
3750  flags |= SDL_WINDOW_RESIZABLE;
3751  window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
3752  SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
3753  if (window) {
3754  renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
3755  if (!renderer) {
3756  av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
3757  renderer = SDL_CreateRenderer(window, -1, 0);
3758  }
3759  if (renderer) {
3760  if (!SDL_GetRendererInfo(renderer, &renderer_info))
3761  av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", renderer_info.name);
3762  }
3763  }
3764  if (!window || !renderer || !renderer_info.num_texture_formats) {
3765  av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError());
3766  do_exit(NULL);
3767  }
3768  }
3769 
3771  if (!is) {
3772  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3773  do_exit(NULL);
3774  }
3775 
3776  event_loop(is);
3777 
3778  /* never returns */
3779 
3780  return 0;
3781 }
OPT_FLOAT
#define OPT_FLOAT
Definition: cmdutils.h:168
AVSubtitle
Definition: avcodec.h:2722
rect::w
int w
Definition: f_ebur128.c:91
sws_getCachedContext
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2397
do_exit
static void do_exit(VideoState *is)
Definition: ffplay.c:1299
VideoState::seek_rel
int64_t seek_rel
Definition: ffplay.c:215
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:634
AVCodec
AVCodec.
Definition: codec.h:197
opt_frame_size
static int opt_frame_size(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3477
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
VideoState::video_st
AVStream * video_st
Definition: ffplay.c:285
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
Frame::width
int width
Definition: ffplay.c:162
AVFMT_NO_BYTE_SEEK
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:470
av_clip
#define av_clip
Definition: common.h:122
AudioParams::fmt
enum AVSampleFormat fmt
Definition: ffplay.c:139
av_sync_type
static int av_sync_type
Definition: ffplay.c:331
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
renderer_info
static SDL_RendererInfo renderer_info
Definition: ffplay.c:368
configure_filtergraph
int configure_filtergraph(FilterGraph *fg)
Definition: ffmpeg_filter.c:983
frame_queue_nb_remaining
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:804
frame_queue_next
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:788
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:56
Decoder::finished
int finished
Definition: ffplay.c:194
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:879
AVCodecContext::channel_layout
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:1247
av_compare_ts
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
AVProgram::nb_stream_indexes
unsigned int nb_stream_indexes
Definition: avformat.h:1155
av_fifo_generic_write
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int(*func)(void *, void *, int))
Feed data from a user-supplied callback to an AVFifoBuffer.
Definition: fifo.c:122
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
frame_queue_last_pos
static int64_t frame_queue_last_pos(FrameQueue *f)
Definition: ffplay.c:810
out
FILE * out
Definition: movenc.c:54
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1196
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:108
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
video_thread
static int video_thread(void *arg)
Definition: ffplay.c:2132
VideoState::av_sync_type
int av_sync_type
Definition: ffplay.c:234
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
set_default_window_size
static void set_default_window_size(int width, int height, AVRational sar)
Definition: ffplay.c:1325
AV_PIX_FMT_BGR32
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:374
AV_NOSYNC_THRESHOLD
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:88
AVFormatContext::nb_chapters
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1463
AVCodecParameters
This struct describes the properties of an encoded stream.
Definition: codec_par.h:52
AV_LOG_QUIET
#define AV_LOG_QUIET
Print no output.
Definition: log.h:176
FrameQueue::keep_last
int keep_last
Definition: ffplay.c:176
sub
static float sub(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:32
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:925
VideoState::audio_hw_buf_size
int audio_hw_buf_size
Definition: ffplay.c:244
decoder_decode_frame
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
Definition: ffplay.c:591
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
AV_DISPOSITION_ATTACHED_PIC
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:841
FrameQueue::cond
SDL_cond * cond
Definition: ffplay.c:179
av_fifo_grow
int av_fifo_grow(AVFifoBuffer *f, unsigned int size)
Enlarge an AVFifoBuffer.
Definition: fifo.c:107
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:140
AVStream::discard
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:928
av_find_program_from_stream
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: utils.c:4213
display_disable
static int display_disable
Definition: ffplay.c:326
screen_width
static int screen_width
Definition: ffplay.c:316
DEBUG
#define DEBUG
Definition: vf_framerate.c:29
sws_dict
AVDictionary * sws_dict
Definition: cmdutils.c:68
swr_set_compensation
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:897
SAMPLE_ARRAY_SIZE
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:106
rect
Definition: f_ebur128.c:91
update_volume
static void update_volume(VideoState *is, int sign, double step)
Definition: ffplay.c:1511
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
VideoState::auddec
Decoder auddec
Definition: ffplay.c:228
screen_left
static int screen_left
Definition: ffplay.c:318
av_opt_set_int_list
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:727
AudioParams::frame_size
int frame_size
Definition: ffplay.c:140
AVSubtitleRect
Definition: avcodec.h:2687
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
Decoder::next_pts
int64_t next_pts
Definition: ffplay.c:199
decoder_start
static int decoder_start(Decoder *d, int(*fn)(void *), const char *thread_name, void *arg)
Definition: ffplay.c:2121
rect::y
int y
Definition: f_ebur128.c:91
FrameQueue::size
int size
Definition: ffplay.c:174
avformat_get_class
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:246
av_unused
#define av_unused
Definition: attributes.h:131
Frame::sar
AVRational sar
Definition: ffplay.c:165
AudioParams::channel_layout
int64_t channel_layout
Definition: ffplay.c:138
out_size
int out_size
Definition: movenc.c:55
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
upload_texture
static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx)
Definition: ffplay.c:908
VideoState::vis_texture
SDL_Texture * vis_texture
Definition: ffplay.c:273
queue_picture
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
Definition: ffplay.c:1744
av_get_channel_layout_string
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
Definition: channel_layout.c:217
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
AudioParams
Definition: ffplay.c:135
VideoState::subtitle_st
AVStream * subtitle_st
Definition: ffplay.c:278
pixdesc.h
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1300
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
VideoState::frame_last_filter_delay
double frame_last_filter_delay
Definition: ffplay.c:283
AVFrame::width
int width
Definition: frame.h:376
VideoState::xleft
int xleft
Definition: ffplay.c:293
Frame::pts
double pts
Definition: ffplay.c:159
w
uint8_t w
Definition: llviddspenc.c:39
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:586
OPT_INPUT
#define OPT_INPUT
Definition: cmdutils.h:178
avcodec_decode_subtitle2
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:1034
frame_queue_init
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
Definition: ffplay.c:691
subtitle_codec_name
static const char * subtitle_codec_name
Definition: ffplay.c:346
AVPacket::data
uint8_t * data
Definition: packet.h:369
EXTERNAL_CLOCK_MIN_FRAMES
#define EXTERNAL_CLOCK_MIN_FRAMES
Definition: ffplay.c:70
HAS_ARG
#define HAS_ARG
Definition: cmdutils.h:161
b
#define b
Definition: input.c:41
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:147
AVChapter::start
int64_t start
Definition: avformat.h:1192
Clock
Definition: ffplay.c:144
data
const char data[16]
Definition: mxf.c:142
frame_queue_destory
static void frame_queue_destory(FrameQueue *f)
Definition: ffplay.c:712
SAMPLE_QUEUE_SIZE
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:132
program_name
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:65
Decoder::queue
PacketQueue * queue
Definition: ffplay.c:191
format_opts
AVDictionary * format_opts
Definition: cmdutils.c:70
AVSEEK_FLAG_BYTE
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2416
AVIOContext::error
int error
contains the error code or 0 if no error happened
Definition: avio.h:245
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:70
AV_PIX_FMT_RGB32_1
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:373
VideoState::audio_diff_avg_coef
double audio_diff_avg_coef
Definition: ffplay.c:239
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
CURSOR_HIDE_DELAY
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:108
SDL_VOLUME_STEP
#define SDL_VOLUME_STEP
Definition: ffplay.c:79
show_help_children
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
Definition: cmdutils.c:203
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
autorotate
static int autorotate
Definition: ffplay.c:356
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:387
TextureFormatEntry::texture_fmt
int texture_fmt
Definition: ffplay.c:373
video_disable
static int video_disable
Definition: ffplay.c:321
Frame::uploaded
int uploaded
Definition: ffplay.c:166
mathematics.h
sws_scale
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:745
AVDictionary
Definition: dict.c:30
compute_target_delay
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1526
Frame
Definition: ffplay.c:155
stream_close
static void stream_close(VideoState *is)
Definition: ffplay.c:1262
av_read_frame
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1741
av_fifo_generic_read
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
Definition: fifo.c:213
SDL_AUDIO_MAX_CALLBACKS_PER_SEC
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:76
AVFMT_NOBINSEARCH
#define AVFMT_NOBINSEARCH
Format does not allow to fall back on binary search via read_timestamp.
Definition: avformat.h:468
VideoState::paused
int paused
Definition: ffplay.c:209
init_clock
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1407
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
AV_OPT_FLAG_FILTERING_PARAM
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:294
opt_seek
static int opt_seek(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3526
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:342
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:101
VideoState::iformat
AVInputFormat * iformat
Definition: ffplay.c:206
get_master_clock
static double get_master_clock(VideoState *is)
Definition: ffplay.c:1440
VideoState::width
int width
Definition: ffplay.c:293
sample_rate
sample_rate
Definition: ffmpeg_filter.c:170
dummy
static int dummy
Definition: ffplay.c:3582
update_video_pts
static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial)
Definition: ffplay.c:1568
FF_QUIT_EVENT
#define FF_QUIT_EVENT
Definition: ffplay.c:364
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:75
PacketQueue
Definition: ffplay.c:119
subtitle_thread
static int subtitle_thread(void *arg)
Definition: ffplay.c:2239
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
VideoState::last_subtitle_stream
int last_subtitle_stream
Definition: ffplay.c:305
VideoState::SHOW_MODE_NONE
@ SHOW_MODE_NONE
Definition: ffplay.c:263
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:120
OptionDef
Definition: cmdutils.h:158
audio_decode_frame
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2342
subtitle_disable
static int subtitle_disable
Definition: ffplay.c:322
VideoState::pictq
FrameQueue pictq
Definition: ffplay.c:224
genpts
static int genpts
Definition: ffplay.c:335
VideoState::swr_ctx
struct SwrContext * swr_ctx
Definition: ffplay.c:258
AVFifoBuffer
Definition: fifo.h:31
opt_sync
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3511
step_to_next_frame
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1518
VideoState::sampq
FrameQueue sampq
Definition: ffplay.c:226
TextureFormatEntry::format
enum AVPixelFormat format
Definition: ffplay.c:372
FrameQueue::rindex
int rindex
Definition: ffplay.c:172
video_display
static void video_display(VideoState *is)
Definition: ffplay.c:1361
AVCodec::max_lowres
uint8_t max_lowres
maximum value for lowres supported by the decoder
Definition: codec.h:222
avformat_close_input
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:4481
parse_number_or_die
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max)
Parse a string and return its corresponding value as a double.
Definition: cmdutils.c:141
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:518
AVFormatContext::interrupt_callback
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1512
avcodec_find_decoder_by_name
AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
Definition: allcodecs.c:974
SDL_AUDIO_MIN_BUFFER_SIZE
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:74
print_error
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1084
startup_volume
static int startup_volume
Definition: ffplay.c:329
window
static SDL_Window * window
Definition: ffplay.c:366
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
Definition: avfiltergraph.c:142
AVCodecParameters::channels
int channels
Audio only.
Definition: codec_par.h:166
fifo.h
toggle_full_screen
static void toggle_full_screen(VideoState *is)
Definition: ffplay.c:3215
Clock::queue_serial
int * queue_serial
Definition: ffplay.c:151
AVFormatContext::iformat
ff_const59 struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1244
VideoState::extclk
Clock extclk
Definition: ffplay.c:222
VideoState::seek_flags
int seek_flags
Definition: ffplay.c:213
alwaysontop
static int alwaysontop
Definition: ffplay.c:328
VideoState::audio_st
AVStream * audio_st
Definition: ffplay.c:242
packet_queue_init
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:479
AUDIO_DIFF_AVG_NB
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:99
fail
#define fail()
Definition: checkasm.h:133
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:83
opt_duration
static int opt_duration(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3532
FrameQueue
Definition: ffplay.c:170
packet_queue_put
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:450
samplefmt.h
AVSubtitleRect::x
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2688
VideoState::video_stream
int video_stream
Definition: ffplay.c:284
autoexit
static int autoexit
Definition: ffplay.c:338
avio_tell
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:557
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:61
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
AVChapter
Definition: avformat.h:1185
video_image_display
static void video_image_display(VideoState *is)
Definition: ffplay.c:975
val
static double val(void *priv, double ch)
Definition: aeval.c:76
AV_SYNC_AUDIO_MASTER
@ AV_SYNC_AUDIO_MASTER
Definition: ffplay.c:184
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
opt_show_mode
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3538
Decoder::empty_queue_cond
SDL_cond * empty_queue_cond
Definition: ffplay.c:196
pts
static int64_t pts
Definition: transcode_aac.c:652
set_clock_speed
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1401
VideoState::audio_diff_threshold
double audio_diff_threshold
Definition: ffplay.c:240
sws_get_class
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
Definition: options.c:95
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:465
VideoState::audio_diff_cum
double audio_diff_cum
Definition: ffplay.c:238
VideoState::last_video_stream
int last_video_stream
Definition: ffplay.c:305
OPT_STRING
#define OPT_STRING
Definition: cmdutils.h:164
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
fast
static int fast
Definition: ffplay.c:334
fn
#define fn(a)
Definition: colorspacedsp_template.c:42
loop
static int loop
Definition: ffplay.c:341
AVRational::num
int num
Numerator.
Definition: rational.h:59
avformat_network_init
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:5056
VideoState::rdft_bits
int rdft_bits
Definition: ffplay.c:269
opt_height
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3489
avsubtitle_free
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: avcodec.c:551
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
sdl_texture_format_map
static const struct TextureFormatEntry sdl_texture_format_map[]
AVFormatContext::bit_rate
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1354
is_full_screen
static int is_full_screen
Definition: ffplay.c:361
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:212
avassert.h
set_sdl_yuv_conversion_mode
static void set_sdl_yuv_conversion_mode(AVFrame *frame)
Definition: ffplay.c:959
lrint
#define lrint
Definition: tablegen.h:53
Frame::flip_v
int flip_v
Definition: ffplay.c:167
av_guess_sample_aspect_ratio
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio.
Definition: utils.c:5119
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:220
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
av_fifo_space
int av_fifo_space(const AVFifoBuffer *f)
Return the amount of space in bytes in the AVFifoBuffer, that is the amount of data you can write int...
Definition: fifo.c:82
AVFormatContext::metadata
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1474
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
AVInputFormat
Definition: avformat.h:640
audio_thread
static int audio_thread(void *arg)
Definition: ffplay.c:2034
set_clock
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1395
av_dump_format
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
Definition: dump.c:640
VideoState
Definition: ffplay.c:204
frame_queue_peek_next
static Frame * frame_queue_peek_next(FrameQueue *f)
Definition: ffplay.c:736
sdl_audio_callback
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
Definition: ffplay.c:2455
sync_clock_to_slave
static void sync_clock_to_slave(Clock *c, Clock *slave)
Definition: ffplay.c:1415
swr_init
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:152
opt_input_file
static void opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3547
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
frame_queue_signal
static void frame_queue_signal(FrameQueue *f)
Definition: ffplay.c:724
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
OPT_INT
#define OPT_INT
Definition: cmdutils.h:167
VideoState::img_convert_ctx
struct SwsContext * img_convert_ctx
Definition: ffplay.c:288
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:173
width
#define width
VideoState::ShowMode
ShowMode
Definition: ffplay.c:262
Decoder::avctx
AVCodecContext * avctx
Definition: ffplay.c:192
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_OPT_FLAG_ENCODING_PARAM
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
Definition: opt.h:278
show_help_default
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3647
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
default_height
static int default_height
Definition: ffplay.c:315
AVFormatContext::flags
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1363
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
AVCodecParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only.
Definition: codec_par.h:136
AVFrame::channels
int channels
number of audio channels, only used for audio.
Definition: frame.h:624
AVInputFormat::name
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:645
AV_PIX_FMT_0BGR32
#define AV_PIX_FMT_0BGR32
Definition: pixfmt.h:377
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:519
AVDictionaryEntry::key
char * key
Definition: dict.h:82
Clock::last_updated
double last_updated
Definition: ffplay.c:147
PacketQueue::duration
int64_t duration
Definition: ffplay.c:123
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
AVSubtitleRect::y
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2689
AVCodecParameters::width
int width
Video only.
Definition: codec_par.h:126
calculate_display_rect
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
Definition: ffplay.c:863
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
screen_height
static int screen_height
Definition: ffplay.c:317
EXTERNAL_CLOCK_SPEED_STEP
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:96
AV_CH_LAYOUT_STEREO_DOWNMIX
#define AV_CH_LAYOUT_STEREO_DOWNMIX
Definition: channel_layout.h:117
Decoder::pkt_serial
int pkt_serial
Definition: ffplay.c:193
opt_frame_pix_fmt
static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3505
avcodec_receive_frame
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:652
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
AVMEDIA_TYPE_NB
@ AVMEDIA_TYPE_NB
Definition: avutil.h:206
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:309
AV_SYNC_EXTERNAL_CLOCK
@ AV_SYNC_EXTERNAL_CLOCK
Definition: ffplay.c:186
av_read_play
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: utils.c:4298
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
opt_codec
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3560
Clock::pts_drift
double pts_drift
Definition: ffplay.c:146
swr_convert
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t *out_arg[SWR_CH_MAX], int out_count, const uint8_t *in_arg[SWR_CH_MAX], int in_count)
Definition: swresample.c:714
VideoState::videoq
PacketQueue videoq
Definition: ffplay.c:286
ctx
AVFormatContext * ctx
Definition: movenc.c:48
channels
channels
Definition: aptx.h:33
limits.h
REFRESH_RATE
#define REFRESH_RATE
Definition: ffplay.c:102
FrameQueue::rindex_shown
int rindex_shown
Definition: ffplay.c:177
nb_streams
static int nb_streams
Definition: ffprobe.c:283
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
VideoState::force_refresh
int force_refresh
Definition: ffplay.c:208
get_clock
static double get_clock(Clock *c)
Definition: ffplay.c:1375
screen_top
static int screen_top
Definition: ffplay.c:319
VideoState::audio_diff_avg_count
int audio_diff_avg_count
Definition: ffplay.c:241
EXTERNAL_CLOCK_SPEED_MIN
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:94
sws_flags
static unsigned sws_flags
Definition: ffplay.c:112
parse_options
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:379
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
renderer
static SDL_Renderer * renderer
Definition: ffplay.c:367
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
SwrContext
The libswresample context.
Definition: swresample_internal.h:95
vp_duration
static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp)
Definition: ffplay.c:1556
swr_alloc_set_opts
struct SwrContext * swr_alloc_set_opts(struct SwrContext *s, int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
Definition: swresample.c:59
NAN
#define NAN
Definition: mathematics.h:64
f
#define f(width, name)
Definition: cbs_vp9.c:255
AV_PIX_FMT_BGR32_1
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:375
VideoState::step
int step
Definition: ffplay.c:294
av_rdft_calc
void av_rdft_calc(RDFTContext *s, FFTSample *data)
synchronize_audio
static int synchronize_audio(VideoState *is, int nb_samples)
Definition: ffplay.c:2294
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:546
arg
const char * arg
Definition: jacosubdec.c:66
if
if(ret)
Definition: filter_design.txt:179
window_title
static const char * window_title
Definition: ffplay.c:313
Clock::speed
double speed
Definition: ffplay.c:148
VideoState::SHOW_MODE_VIDEO
@ SHOW_MODE_VIDEO
Definition: ffplay.c:263
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:236
AVFormatContext
Format I/O context.
Definition: avformat.h:1232
av_log_get_level
int av_log_get_level(void)
Get the current log level.
Definition: log.c:435
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:545
init_dynload
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:117
opts
AVDictionary * opts
Definition: movenc.c:50
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1038
AVSubtitleRect::w
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:2690
seek_chapter
static void seek_chapter(VideoState *is, int incr)
Definition: ffplay.c:3250
get_master_sync_type
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1423
avcodec_get_class
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:311
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1218
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:902
NULL
#define NULL
Definition: coverity.c:32
stream_open
static VideoState * stream_open(const char *filename, AVInputFormat *iformat)
Definition: ffplay.c:3078
FrameQueue::max_size
int max_size
Definition: ffplay.c:175
OPT_EXPERT
#define OPT_EXPERT
Definition: cmdutils.h:163
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
Decoder
Definition: ffplay.c:189
AudioParams::freq
int freq
Definition: ffplay.c:136
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:188
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
isnan
#define isnan(x)
Definition: libm.h:340
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:852
stream_cycle_channel
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3136
VideoState::frame_drops_late
int frame_drops_late
Definition: ffplay.c:260
AV_CODEC_FLAG2_FAST
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:348
VideoState::rdft
RDFTContext * rdft
Definition: ffplay.c:268
AVFormatContext::pb
AVIOContext * pb
I/O context.
Definition: avformat.h:1274
av_log_set_flags
void av_log_set_flags(int arg)
Definition: log.c:445
parseutils.h
frame_queue_unref_item
static void frame_queue_unref_item(Frame *vp)
Definition: ffplay.c:685
FrameQueue::queue
Frame queue[FRAME_QUEUE_SIZE]
Definition: ffplay.c:171
VideoState::last_i_start
int last_i_start
Definition: ffplay.c:267
init_opts
void init_opts(void)
Initialize the cmdutils option system, in particular allocate the *_opts contexts.
Definition: cmdutils.c:82
Decoder::packet_pending
int packet_pending
Definition: ffplay.c:195
cursor_last_shown
static int64_t cursor_last_shown
Definition: ffplay.c:349
AVProgram::stream_index
unsigned int * stream_index
Definition: avformat.h:1154
frame_queue_peek
static Frame * frame_queue_peek(FrameQueue *f)
Definition: ffplay.c:731
OPT_INT64
#define OPT_INT64
Definition: cmdutils.h:170
avfilter_inout_alloc
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
Definition: graphparser.c:207
Frame::duration
double duration
Definition: ffplay.c:160
DFT_R2C
@ DFT_R2C
Definition: avfft.h:72
lowres
static int lowres
Definition: ffplay.c:336
AV_DICT_DONT_OVERWRITE
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:76
AV_PIX_FMT_RGB8
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
Definition: pixfmt.h:86
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:144
time.h
Frame::pos
int64_t pos
Definition: ffplay.c:161
FFTSample
float FFTSample
Definition: avfft.h:35
avfft.h
VideoState::frame_last_returned_time
double frame_last_returned_time
Definition: ffplay.c:282
set_clock_at
static void set_clock_at(Clock *c, double pts, int serial, double time)
Definition: ffplay.c:1387
toggle_pause
static void toggle_pause(VideoState *is)
Definition: ffplay.c:1500
TextureFormatEntry
Definition: ffplay.c:371
AVFilterGraph
Definition: avfilter.h:850
stream_component_open
static int stream_component_open(VideoState *is, int stream_index)
Definition: ffplay.c:2572
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
av_buffersink_get_channel_layout
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
av_packet_ref
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:641
fp
#define fp
Definition: regdef.h:44
av_packet_move_ref
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:690
AV_PIX_FMT_NE
#define AV_PIX_FMT_NE(be, le)
Definition: pixfmt.h:369
exp
int8_t exp
Definition: eval.c:72
VideoState::seek_req
int seek_req
Definition: ffplay.c:212
VideoState::SHOW_MODE_WAVES
@ SHOW_MODE_WAVES
Definition: ffplay.c:263
VideoState::audio_clock
double audio_clock
Definition: ffplay.c:236
VideoState::read_pause_return
int read_pause_return
Definition: ffplay.c:216
event_loop
static void event_loop(VideoState *cur_stream)
Definition: ffplay.c:3278
swresample.h
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
VideoState::ytop
int ytop
Definition: ffplay.c:293
AVCodecParameters::sample_rate
int sample_rate
Audio only.
Definition: codec_par.h:170
av_get_channel_layout_nb_channels
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
Definition: channel_layout.c:226
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:586
VideoState::sample_array
int16_t sample_array[SAMPLE_ARRAY_SIZE]
Definition: ffplay.c:265
AVFormatContext::nb_streams
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1288
exit_on_mousedown
static int exit_on_mousedown
Definition: ffplay.c:340
AVFilterGraph::scale_sws_opts
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:855
Decoder::next_pts_tb
AVRational next_pts_tb
Definition: ffplay.c:200
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1754
VideoState::audioq
PacketQueue audioq
Definition: ffplay.c:243
codec_opts
AVDictionary * codec_opts
Definition: cmdutils.c:70
audio_callback_time
static int64_t audio_callback_time
Definition: ffplay.c:362
eval.h
avformat_find_stream_info
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: utils.c:3602
Frame::format
int format
Definition: ffplay.c:164
get_valid_channel_layout
static int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
Definition: ffplay.c:418
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:623
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:401
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AVMediaType
AVMediaType
Definition: avutil.h:199
av_log_set_callback
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:455
AVPacket::size
int size
Definition: packet.h:370
avformat_match_stream_specifier
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
Definition: utils.c:5326
VideoState::audio_write_buf_size
int audio_write_buf_size
Definition: ffplay.c:250
av_rdft_init
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
avformat_alloc_context
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:211
AVDISCARD_DEFAULT
@ AVDISCARD_DEFAULT
discard useless packets like 0 size packets in avi
Definition: avcodec.h:231
FrameQueue::mutex
SDL_mutex * mutex
Definition: ffplay.c:178
frame_queue_peek_writable
static Frame * frame_queue_peek_writable(FrameQueue *f)
Definition: ffplay.c:746
OPT_AUDIO
#define OPT_AUDIO
Definition: cmdutils.h:166
Frame::sub
AVSubtitle sub
Definition: ffplay.c:157
VideoState::last_audio_stream
int last_audio_stream
Definition: ffplay.c:305
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:490
decoder_init
static int decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
Definition: ffplay.c:578
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:135
sp
#define sp
Definition: regdef.h:63
start_time
static int64_t start_time
Definition: ffplay.c:332
FFMAX
#define FFMAX(a, b)
Definition: common.h:103
VideoState::SHOW_MODE_NB
@ SHOW_MODE_NB
Definition: ffplay.c:263
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1204
Frame::serial
int serial
Definition: ffplay.c:158
AVCodecContext::pkt_timebase
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:2085
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
hypot
static av_const double hypot(double x, double y)
Definition: libm.h:366
uninit_opts
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents.
Definition: cmdutils.c:87
size
int size
Definition: twinvq_data.h:10344
VideoState::xpos
int xpos
Definition: ffplay.c:271
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
read_thread
static int read_thread(void *arg)
Definition: ffplay.c:2755
AV_PIX_FMT_BGR555
#define AV_PIX_FMT_BGR555
Definition: pixfmt.h:392
avformat_seek_file
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
Definition: utils.c:2512
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
Clock::paused
int paused
Definition: ffplay.c:150
rect::h
int h
Definition: f_ebur128.c:91
VideoState::sub_texture
SDL_Texture * sub_texture
Definition: ffplay.c:274
av_guess_frame_rate
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: utils.c:5142
swr_free
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
Definition: swresample.c:137
VideoState::vid_texture
SDL_Texture * vid_texture
Definition: ffplay.c:275
setup_find_stream_info_opts
AVDictionary ** setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts)
Setup AVCodecContext options for avformat_find_stream_info().
Definition: cmdutils.c:2161
printf
printf("static const uint8_t my_array[100] = {\n")
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:391
VideoState::sample_array_index
int sample_array_index
Definition: ffplay.c:266
wanted_stream_spec
static const char * wanted_stream_spec[AVMEDIA_TYPE_NB]
Definition: ffplay.c:323
infinite_buffer
static int infinite_buffer
Definition: ffplay.c:343
VideoState::max_frame_duration
double max_frame_duration
Definition: ffplay.c:287
avdevice.h
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:560
packet_queue_destroy
static void packet_queue_destroy(PacketQueue *q)
Definition: ffplay.c:515
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:368
height
#define height
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:372
VideoState::frame_drops_early
int frame_drops_early
Definition: ffplay.c:259
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
FFMIN
#define FFMIN(a, b)
Definition: common.h:105
AVFrame::channel_layout
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:495
toggle_mute
static void toggle_mute(VideoState *is)
Definition: ffplay.c:1506
av_format_inject_global_side_data
void av_format_inject_global_side_data(AVFormatContext *s)
This function will cause global side data to be injected in the next packet of each stream as well as...
Definition: utils.c:150
decoder_abort
static void decoder_abort(Decoder *d, FrameQueue *fq)
Definition: ffplay.c:819
video_refresh
static void video_refresh(void *opaque, double *remaining_time)
Definition: ffplay.c:1575
ns
#define ns(max_value, name, subs,...)
Definition: cbs_av1.c:682
seek_interval
static float seek_interval
Definition: ffplay.c:325
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:64
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
rect::x
int x
Definition: f_ebur128.c:91
VideoState::seek_pos
int64_t seek_pos
Definition: ffplay.c:214
frame_queue_push
static void frame_queue_push(FrameQueue *f)
Definition: ffplay.c:778
audio_dev
static SDL_AudioDeviceID audio_dev
Definition: ffplay.c:369
sigterm_handler
static void sigterm_handler(int sig)
Definition: ffplay.c:1320
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
sample_rates
sample_rates
Definition: ffmpeg_filter.c:170
packet_queue_abort
static void packet_queue_abort(PacketQueue *q)
Definition: ffplay.c:523
video_codec_name
static const char * video_codec_name
Definition: ffplay.c:347
AVCodecContext::channels
int channels
number of audio channels
Definition: avcodec.h:1197
buffersink.h
AVCodec::id
enum AVCodecID id
Definition: codec.h:211
show_banner
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1183
packet_queue_flush
static void packet_queue_flush(PacketQueue *q)
Definition: ffplay.c:499
packet_queue_get
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
Definition: ffplay.c:543
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:83
MAX_QUEUE_SIZE
#define MAX_QUEUE_SIZE
Definition: ffplay.c:68
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
functionally identical to above
Definition: pixfmt.h:520
avcodec_get_name
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:471
MIN_FRAMES
#define MIN_FRAMES
Definition: ffplay.c:69
VideoState::queue_attachments_req
int queue_attachments_req
Definition: ffplay.c:211
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
avcodec_send_packet
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:589
av_find_input_format
ff_const59 AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
Definition: format.c:118
FrameQueue::windex
int windex
Definition: ffplay.c:173
VideoState::filename
char * filename
Definition: ffplay.c:292
VideoState::muted
int muted
Definition: ffplay.c:252
Decoder::start_pts
int64_t start_pts
Definition: ffplay.c:197
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:384
bprint.h
Clock::pts
double pts
Definition: ffplay.c:145
i
int i
Definition: input.c:407
VIDEO_PICTURE_QUEUE_SIZE
#define VIDEO_PICTURE_QUEUE_SIZE
Definition: ffplay.c:130
PacketQueue::serial
int serial
Definition: ffplay.c:125
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:362
VideoState::show_mode
enum VideoState::ShowMode show_mode
VideoState::audio_src
struct AudioParams audio_src
Definition: ffplay.c:253
program_birth_year
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffplay.c:66
avformat_open_input
int avformat_open_input(AVFormatContext **ps, const char *url, ff_const59 AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: utils.c:512
VideoState::audio_buf1
uint8_t * audio_buf1
Definition: ffplay.c:246
av_get_bytes_per_sample
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:106
avfilter_graph_parse_ptr
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs, void *log_ctx)
Add a graph described by a string to a graph.
Definition: graphparser.c:549
swr_opts
AVDictionary * swr_opts
Definition: cmdutils.c:69
PacketQueue::pkt_list
AVFifoBuffer * pkt_list
Definition: ffplay.c:120
compute_mod
static int compute_mod(int a, int b)
Definition: ffplay.c:1058
Decoder::start_pts_tb
AVRational start_pts_tb
Definition: ffplay.c:198
VideoState::rdft_data
FFTSample * rdft_data
Definition: ffplay.c:270
AVCodecParameters::height
int height
Definition: codec_par.h:127
avcodec_parameters_to_context
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: codec_par.c:147
AV_TIME_BASE
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:365
Decoder::pkt
AVPacket * pkt
Definition: ffplay.c:190
file_iformat
static AVInputFormat * file_iformat
Definition: ffplay.c:311
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
video_open
static int video_open(VideoState *is)
Definition: ffplay.c:1337
RDFTContext
Definition: rdft.h:28
get_sdl_pix_fmt_and_blendmode
static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
Definition: ffplay.c:890
show_status
static int show_status
Definition: ffplay.c:330
opt_format
static int opt_format(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3495
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
filter_codec_opts
AVDictionary * filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, const AVCodec *codec)
Filter out options for given codec.
Definition: cmdutils.c:2103
AV_PIX_FMT_RGB555
#define AV_PIX_FMT_RGB555
Definition: pixfmt.h:387
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:279
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:582
borderless
static int borderless
Definition: ffplay.c:327
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
update_sample_display
static void update_sample_display(VideoState *is, short *samples, int samples_size)
Definition: ffplay.c:2274
MyAVPacketList
Definition: ffplay.c:114
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:61
tb
#define tb
Definition: regdef.h:68
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
AVProgram
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1150
av_read_pause
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
Definition: utils.c:4307
len
int len
Definition: vorbis_enc_data.h:452
Frame::frame
AVFrame * frame
Definition: ffplay.c:156
AV_PIX_FMT_BGR565
#define AV_PIX_FMT_BGR565
Definition: pixfmt.h:391
av_samples_get_buffer_size
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
PacketQueue::nb_packets
int nb_packets
Definition: ffplay.c:121
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
FRAME_QUEUE_SIZE
#define FRAME_QUEUE_SIZE
Definition: ffplay.c:133
frame_queue_peek_readable
static Frame * frame_queue_peek_readable(FrameQueue *f)
Definition: ffplay.c:762
AVIOInterruptCB::opaque
void * opaque
Definition: avio.h:60
AV_PIX_FMT_RGB565
#define AV_PIX_FMT_RGB565
Definition: pixfmt.h:386
parse_loglevel
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:502
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
Clock::serial
int serial
Definition: ffplay.c:149
VideoState::height
int height
Definition: ffplay.c:293
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
AVStream::disposition
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:926
AVFMT_FLAG_GENPTS
#define AVFMT_FLAG_GENPTS
Generate missing pts even if it requires parsing future frames.
Definition: avformat.h:1364
VideoState::subpq
FrameQueue subpq
Definition: ffplay.c:225
seek_by_bytes
static int seek_by_bytes
Definition: ffplay.c:324
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:873
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:188
stream_seek
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
Definition: ffplay.c:1473
EXTERNAL_CLOCK_MAX_FRAMES
#define EXTERNAL_CLOCK_MAX_FRAMES
Definition: ffplay.c:71
AVSubtitleRect::h
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:2691
AV_SYNC_VIDEO_MASTER
@ AV_SYNC_VIDEO_MASTER
Definition: ffplay.c:185
avcodec_flush_buffers
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal codec state / flush internal buffers.
Definition: avcodec.c:491
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_PIX_FMT_0RGB32
#define AV_PIX_FMT_0RGB32
Definition: pixfmt.h:376
video_stream
static AVStream * video_stream
Definition: demuxing_decoding.c:41
audio_open
static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
Definition: ffplay.c:2498
filter_nbthreads
static int filter_nbthreads
Definition: ffplay.c:358
avcodec_find_decoder
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: allcodecs.c:946
log_callback_help
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
Definition: cmdutils.c:96
cursor_hidden
static int cursor_hidden
Definition: ffplay.c:350
VideoState::SHOW_MODE_RDFT
@ SHOW_MODE_RDFT
Definition: ffplay.c:263
find_stream_info
static int find_stream_info
Definition: ffplay.c:357
packet_queue_put_private
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:426
pos
unsigned int pos
Definition: spdifenc.c:412
VideoState::audio_buf_index
int audio_buf_index
Definition: ffplay.c:249
avformat.h
iformat
static AVInputFormat * iformat
Definition: ffprobe.c:260
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
dict.h
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:406
VideoState::last_paused
int last_paused
Definition: ffplay.c:210
AV_LOG_SKIP_REPEATED
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:384
AV_PIX_FMT_UYVY422
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:81
AV_DICT_MATCH_CASE
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:69
rdftspeed
double rdftspeed
Definition: ffplay.c:348
MyAVPacketList::serial
int serial
Definition: ffplay.c:116
opt_width
static int opt_width(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3483
main
int main(int argc, char **argv)
Definition: ffplay.c:3683
avformat_network_deinit
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:5068
show_usage
static void show_usage(void)
Definition: ffplay.c:3640
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:71
AVCodecContext
main external API structure.
Definition: avcodec.h:536
AVFrame::height
int height
Definition: frame.h:376
CMDUTILS_COMMON_OPTIONS
#define CMDUTILS_COMMON_OPTIONS
Definition: cmdutils.h:212
PacketQueue::mutex
SDL_mutex * mutex
Definition: ffplay.c:126
packet_queue_start
static void packet_queue_start(PacketQueue *q)
Definition: ffplay.c:534
VideoState::vidclk
Clock vidclk
Definition: ffplay.c:221
audio_codec_name
static const char * audio_codec_name
Definition: ffplay.c:345
get_rotation
double get_rotation(AVStream *st)
Definition: cmdutils.c:2200
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
AV_SYNC_FRAMEDUP_THRESHOLD
#define AV_SYNC_FRAMEDUP_THRESHOLD
Definition: ffplay.c:86
show_mode
static enum ShowMode show_mode
Definition: ffplay.c:344
PacketQueue::cond
SDL_cond * cond
Definition: ffplay.c:127
sws_freeContext
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2337
PacketQueue::size
int size
Definition: ffplay.c:122
options
static const OptionDef options[]
Definition: ffplay.c:3584
AVInputFormat::flags
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_NOTIMESTAMPS,...
Definition: avformat.h:659
AudioParams::channels
int channels
Definition: ffplay.c:137
AVRational::den
int den
Denominator.
Definition: rational.h:60
mode
mode
Definition: ebur128.h:83
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
VideoState::subtitle_stream
int subtitle_stream
Definition: ffplay.c:277
avfilter.h
VideoState::abort_request
int abort_request
Definition: ffplay.c:207
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:828
VideoState::audio_buf1_size
unsigned int audio_buf1_size
Definition: ffplay.c:248
VideoState::eof
int eof
Definition: ffplay.c:290
AVFMT_NOGENSEARCH
#define AVFMT_NOGENSEARCH
Format does not allow to fall back on generic search.
Definition: avformat.h:469
AV_SYNC_THRESHOLD_MAX
#define AV_SYNC_THRESHOLD_MAX
Definition: ffplay.c:84
av_buffersink_get_channels
int av_buffersink_get_channels(const AVFilterContext *ctx)
decoder_destroy
static void decoder_destroy(Decoder *d)
Definition: ffplay.c:680
av_get_packed_sample_fmt
enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
Get the packed alternative form of the given sample format.
Definition: samplefmt.c:75
VideoState::read_tid
SDL_Thread * read_tid
Definition: ffplay.c:205
VideoState::audio_volume
int audio_volume
Definition: ffplay.c:251
VideoState::subdec
Decoder subdec
Definition: ffplay.c:230
AVIOContext::eof_reached
int eof_reached
true if was unable to read due to error or eof
Definition: avio.h:239
stream_has_enough_packets
static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue)
Definition: ffplay.c:2731
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
AVFormatContext::duration
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1347
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVPacket::stream_index
int stream_index
Definition: packet.h:371
GROW_ARRAY
#define GROW_ARRAY(array, nb_elems)
Definition: cmdutils.h:619
SUBPICTURE_QUEUE_SIZE
#define SUBPICTURE_QUEUE_SIZE
Definition: ffplay.c:131
AVFilterContext
An instance of a filter.
Definition: avfilter.h:341
input_filename
static const char * input_filename
Definition: ffplay.c:312
stream_toggle_pause
static void stream_toggle_pause(VideoState *is)
Definition: ffplay.c:1487
VideoState::continue_read_thread
SDL_cond * continue_read_thread
Definition: ffplay.c:307
av_dict_set_int
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set that converts the value to a string and stores it.
Definition: dict.c:147
toggle_audio_display
static void toggle_audio_display(VideoState *is)
Definition: ffplay.c:3221
AVCodecContext::codec_type
enum AVMediaType codec_type
Definition: avcodec.h:544
opt_default
int opt_default(void *optctx, const char *opt, const char *arg)
Fallback for options that are not explicitly handled, these will be parsed through AVOptions.
Definition: cmdutils.c:541
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVFMT_TS_DISCONT
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:464
get_video_frame
static int get_video_frame(VideoState *is, AVFrame *frame)
Definition: ffplay.c:1775
av_get_default_channel_layout
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
Definition: channel_layout.c:231
av_fifo_size
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
Definition: fifo.c:77
default_width
static int default_width
Definition: ffplay.c:314
AVIOInterruptCB::callback
int(* callback)(void *)
Definition: avio.h:59
VideoState::realtime
int realtime
Definition: ffplay.c:218
VideoState::sub_convert_ctx
struct SwsContext * sub_convert_ctx
Definition: ffplay.c:289
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:136
av_fifo_freep
void av_fifo_freep(AVFifoBuffer **f)
Free an AVFifoBuffer and reset pointer to NULL.
Definition: fifo.c:63
OPT_VIDEO
#define OPT_VIDEO
Definition: cmdutils.h:165
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVDictionaryEntry
Definition: dict.h:81
AVPacket
This structure stores compressed data.
Definition: packet.h:346
audio_disable
static int audio_disable
Definition: ffplay.c:320
refresh_loop_wait_event
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
Definition: ffplay.c:3233
channel_layouts
static const uint16_t channel_layouts[7]
Definition: dca_lbr.c:114
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:502
stream_component_close
static void stream_component_close(VideoState *is, int stream_index)
Definition: ffplay.c:1205
VideoState::subtitleq
PacketQueue subtitleq
Definition: ffplay.c:279
cmdutils.h
cmp_audio_fmts
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
Definition: ffplay.c:407
OPT_BOOL
#define OPT_BOOL
Definition: cmdutils.h:162
Decoder::decoder_tid
SDL_Thread * decoder_tid
Definition: ffplay.c:201
av_fifo_alloc
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
Definition: fifo.c:43
parse_time_or_die
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds.
Definition: cmdutils.c:162
framedrop
static int framedrop
Definition: ffplay.c:342
VideoState::audio_stream
int audio_stream
Definition: ffplay.c:232
imgutils.h
VideoState::audio_buf_size
unsigned int audio_buf_size
Definition: ffplay.c:247
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
av_rdft_end
void av_rdft_end(RDFTContext *s)
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:349
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVFormatContext::start_time
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1337
PacketQueue::abort_request
int abort_request
Definition: ffplay.c:124
VideoState::ic
AVFormatContext * ic
Definition: ffplay.c:217
VideoState::viddec
Decoder viddec
Definition: ffplay.c:229
h
h
Definition: vp9dsp_template.c:2038
AVDictionaryEntry::value
char * value
Definition: dict.h:83
AVStream::start_time
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base.
Definition: avformat.h:912
VideoState::audio_buf
uint8_t * audio_buf
Definition: ffplay.c:245
AVFilterGraph::nb_filters
unsigned nb_filters
Definition: avfilter.h:853
avstring.h
frame_queue_peek_last
static Frame * frame_queue_peek_last(FrameQueue *f)
Definition: ffplay.c:741
VideoState::last_vis_time
double last_vis_time
Definition: ffplay.c:272
show_help_options
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags, int alt_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:174
decoder_reorder_pts
static int decoder_reorder_pts
Definition: ffplay.c:337
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:514
VideoState::audio_tgt
struct AudioParams audio_tgt
Definition: ffplay.c:257
AVChapter::time_base
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1191
audio_stream
static AVStream * audio_stream
Definition: demuxing_decoding.c:41
int
int
Definition: ffmpeg_filter.c:170
SwsContext
Definition: swscale_internal.h:283
VideoState::audclk
Clock audclk
Definition: ffplay.c:220
avfilter_get_class
const AVClass * avfilter_get_class(void)
Definition: avfilter.c:1648
AVFilterInOut
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:1013
FrameQueue::pktq
PacketQueue * pktq
Definition: ffplay.c:180
short
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are short
Definition: writing_filters.txt:89
snprintf
#define snprintf
Definition: snprintf.h:34
video_audio_display
static void video_audio_display(VideoState *s)
Definition: ffplay.c:1063
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
AV_SYNC_THRESHOLD_MIN
#define AV_SYNC_THRESHOLD_MIN
Definition: ffplay.c:82
buffersrc.h
AudioParams::bytes_per_sec
int bytes_per_sec
Definition: ffplay.c:141
check_external_clock_speed
static void check_external_clock_speed(VideoState *is)
Definition: ffplay.c:1458
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2724
SAMPLE_CORRECTION_PERCENT_MAX
#define SAMPLE_CORRECTION_PERCENT_MAX
Definition: ffplay.c:91
EXTERNAL_CLOCK_SPEED_MAX
#define EXTERNAL_CLOCK_SPEED_MAX
Definition: ffplay.c:95
SWS_BICUBIC
#define SWS_BICUBIC
Definition: swscale.h:60
packet_queue_put_nullpacket
static int packet_queue_put_nullpacket(PacketQueue *q, AVPacket *pkt, int stream_index)
Definition: ffplay.c:472
duration
static int64_t duration
Definition: ffplay.c:333
MyAVPacketList::pkt
AVPacket * pkt
Definition: ffplay.c:115
swscale.h
is_realtime
static int is_realtime(AVFormatContext *s)
Definition: ffplay.c:2738
av_x_if_null
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:308
Frame::height
int height
Definition: ffplay.c:163
decode_interrupt_cb
static int decode_interrupt_cb(void *ctx)
Definition: ffplay.c:2725
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2489
VideoState::frame_timer
double frame_timer
Definition: ffplay.c:281
VideoState::audio_clock_serial
int audio_clock_serial
Definition: ffplay.c:237
av_find_best_stream
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
Definition: utils.c:4230
avdevice_register_all
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:66
nb_channels
int nb_channels
Definition: channel_layout.c:81
avio_feof
int avio_feof(AVIOContext *s)
Similar to feof() but also returns nonzero on read errors.
Definition: aviobuf.c:364
realloc_texture
static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
Definition: ffplay.c:839
AV_PIX_FMT_RGB444
#define AV_PIX_FMT_RGB444
Definition: pixfmt.h:388
exit_on_keydown
static int exit_on_keydown
Definition: ffplay.c:339