FFmpeg
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
34 #include "libavutil/eval.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/avassert.h"
42 #include "libavutil/time.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavcodec/avfft.h"
49 
50 #if CONFIG_AVFILTER
51 # include "libavfilter/avfilter.h"
52 # include "libavfilter/buffersink.h"
53 # include "libavfilter/buffersrc.h"
54 #endif
55 
56 #include <SDL.h>
57 #include <SDL_thread.h>
58 
59 #include "cmdutils.h"
60 
61 #include <assert.h>
62 
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65 
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 25
68 #define EXTERNAL_CLOCK_MIN_FRAMES 2
69 #define EXTERNAL_CLOCK_MAX_FRAMES 10
70 
71 /* Minimum SDL audio buffer size, in samples. */
72 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
73 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
74 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
75 
76 /* Step size for volume control in dB */
77 #define SDL_VOLUME_STEP (0.75)
78 
79 /* no AV sync correction is done if below the minimum AV sync threshold */
80 #define AV_SYNC_THRESHOLD_MIN 0.04
81 /* AV sync correction is done if above the maximum AV sync threshold */
82 #define AV_SYNC_THRESHOLD_MAX 0.1
83 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
84 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
85 /* no AV correction is done if too big error */
86 #define AV_NOSYNC_THRESHOLD 10.0
87 
88 /* maximum audio speed change to get correct sync */
89 #define SAMPLE_CORRECTION_PERCENT_MAX 10
90 
91 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
92 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
93 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
94 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
95 
96 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
97 #define AUDIO_DIFF_AVG_NB 20
98 
99 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
100 #define REFRESH_RATE 0.01
101 
102 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
103 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
104 #define SAMPLE_ARRAY_SIZE (8 * 65536)
105 
106 #define CURSOR_HIDE_DELAY 1000000
107 
108 #define USE_ONEPASS_SUBTITLE_RENDER 1
109 
110 static unsigned sws_flags = SWS_BICUBIC;
111 
112 typedef struct MyAVPacketList {
115  int serial;
117 
118 typedef struct PacketQueue {
121  int size;
122  int64_t duration;
124  int serial;
125  SDL_mutex *mutex;
126  SDL_cond *cond;
127 } PacketQueue;
128 
129 #define VIDEO_PICTURE_QUEUE_SIZE 3
130 #define SUBPICTURE_QUEUE_SIZE 16
131 #define SAMPLE_QUEUE_SIZE 9
132 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
133 
134 typedef struct AudioParams {
135  int freq;
136  int channels;
137  int64_t channel_layout;
141 } AudioParams;
142 
143 typedef struct Clock {
144  double pts; /* clock base */
145  double pts_drift; /* clock base minus time at which we updated the clock */
146  double last_updated;
147  double speed;
148  int serial; /* clock is based on a packet with this serial */
149  int paused;
150  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
151 } Clock;
152 
153 /* Common struct for handling all types of decoded data and allocated render buffers. */
154 typedef struct Frame {
157  int serial;
158  double pts; /* presentation timestamp for the frame */
159  double duration; /* estimated duration of the frame */
160  int64_t pos; /* byte position of the frame in the input file */
161  int width;
162  int height;
163  int format;
165  int uploaded;
166  int flip_v;
167 } Frame;
168 
169 typedef struct FrameQueue {
171  int rindex;
172  int windex;
173  int size;
174  int max_size;
177  SDL_mutex *mutex;
178  SDL_cond *cond;
180 } FrameQueue;
181 
182 enum {
183  AV_SYNC_AUDIO_MASTER, /* default choice */
185  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
186 };
187 
188 typedef struct Decoder {
193  int finished;
195  SDL_cond *empty_queue_cond;
196  int64_t start_pts;
198  int64_t next_pts;
200  SDL_Thread *decoder_tid;
201 } Decoder;
202 
203 typedef struct VideoState {
204  SDL_Thread *read_tid;
208  int paused;
211  int seek_req;
213  int64_t seek_pos;
214  int64_t seek_rel;
217  int realtime;
218 
222 
226 
230 
232 
234 
235  double audio_clock;
237  double audio_diff_cum; /* used for AV difference average computation */
246  unsigned int audio_buf_size; /* in bytes */
247  unsigned int audio_buf1_size;
248  int audio_buf_index; /* in bytes */
251  int muted;
253 #if CONFIG_AVFILTER
254  struct AudioParams audio_filter_src;
255 #endif
260 
261  enum ShowMode {
263  } show_mode;
270  int xpos;
272  SDL_Texture *vis_texture;
273  SDL_Texture *sub_texture;
274  SDL_Texture *vid_texture;
275 
279 
280  double frame_timer;
286  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
289  int eof;
290 
291  char *filename;
293  int step;
294 
295 #if CONFIG_AVFILTER
296  int vfilter_idx;
297  AVFilterContext *in_video_filter; // the first filter in the video chain
298  AVFilterContext *out_video_filter; // the last filter in the video chain
299  AVFilterContext *in_audio_filter; // the first filter in the audio chain
300  AVFilterContext *out_audio_filter; // the last filter in the audio chain
301  AVFilterGraph *agraph; // audio filter graph
302 #endif
303 
305 
307 } VideoState;
308 
309 /* options specified by the user */
311 static const char *input_filename;
312 static const char *window_title;
313 static int default_width = 640;
314 static int default_height = 480;
315 static int screen_width = 0;
316 static int screen_height = 0;
317 static int screen_left = SDL_WINDOWPOS_CENTERED;
318 static int screen_top = SDL_WINDOWPOS_CENTERED;
319 static int audio_disable;
320 static int video_disable;
321 static int subtitle_disable;
322 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
323 static int seek_by_bytes = -1;
324 static float seek_interval = 10;
325 static int display_disable;
326 static int borderless;
327 static int alwaysontop;
328 static int startup_volume = 100;
329 static int show_status = 1;
331 static int64_t start_time = AV_NOPTS_VALUE;
332 static int64_t duration = AV_NOPTS_VALUE;
333 static int fast = 0;
334 static int genpts = 0;
335 static int lowres = 0;
336 static int decoder_reorder_pts = -1;
337 static int autoexit;
338 static int exit_on_keydown;
339 static int exit_on_mousedown;
340 static int loop = 1;
341 static int framedrop = -1;
342 static int infinite_buffer = -1;
343 static enum ShowMode show_mode = SHOW_MODE_NONE;
344 static const char *audio_codec_name;
345 static const char *subtitle_codec_name;
346 static const char *video_codec_name;
347 double rdftspeed = 0.02;
348 static int64_t cursor_last_shown;
349 static int cursor_hidden = 0;
350 #if CONFIG_AVFILTER
351 static const char **vfilters_list = NULL;
352 static int nb_vfilters = 0;
353 static char *afilters = NULL;
354 #endif
355 static int autorotate = 1;
356 static int find_stream_info = 1;
357 static int filter_nbthreads = 0;
358 
359 /* current context */
360 static int is_full_screen;
361 static int64_t audio_callback_time;
362 
364 
365 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
366 
367 static SDL_Window *window;
368 static SDL_Renderer *renderer;
369 static SDL_RendererInfo renderer_info = {0};
370 static SDL_AudioDeviceID audio_dev;
371 
372 static const struct TextureFormatEntry {
376  { AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
377  { AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
378  { AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
379  { AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
380  { AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
381  { AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
382  { AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
383  { AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
384  { AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
385  { AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
386  { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
387  { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
388  { AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
389  { AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
390  { AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
391  { AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
392  { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
393  { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
394  { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
395  { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
396 };
397 
398 #if CONFIG_AVFILTER
399 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
400 {
401  GROW_ARRAY(vfilters_list, nb_vfilters);
402  vfilters_list[nb_vfilters - 1] = arg;
403  return 0;
404 }
405 #endif
406 
407 static inline
408 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
409  enum AVSampleFormat fmt2, int64_t channel_count2)
410 {
411  /* If channel count == 1, planar and non-planar formats are the same */
412  if (channel_count1 == 1 && channel_count2 == 1)
414  else
415  return channel_count1 != channel_count2 || fmt1 != fmt2;
416 }
417 
418 static inline
419 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
420 {
421  if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
422  return channel_layout;
423  else
424  return 0;
425 }
426 
428 {
429  MyAVPacketList *pkt1;
430 
431  if (q->abort_request)
432  return -1;
433 
434  pkt1 = av_malloc(sizeof(MyAVPacketList));
435  if (!pkt1)
436  return -1;
437  pkt1->pkt = *pkt;
438  pkt1->next = NULL;
439  if (pkt == &flush_pkt)
440  q->serial++;
441  pkt1->serial = q->serial;
442 
443  if (!q->last_pkt)
444  q->first_pkt = pkt1;
445  else
446  q->last_pkt->next = pkt1;
447  q->last_pkt = pkt1;
448  q->nb_packets++;
449  q->size += pkt1->pkt.size + sizeof(*pkt1);
450  q->duration += pkt1->pkt.duration;
451  /* XXX: should duplicate packet data in DV case */
452  SDL_CondSignal(q->cond);
453  return 0;
454 }
455 
457 {
458  int ret;
459 
460  SDL_LockMutex(q->mutex);
462  SDL_UnlockMutex(q->mutex);
463 
464  if (pkt != &flush_pkt && ret < 0)
466 
467  return ret;
468 }
469 
470 static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
471 {
472  AVPacket pkt1, *pkt = &pkt1;
474  pkt->data = NULL;
475  pkt->size = 0;
476  pkt->stream_index = stream_index;
477  return packet_queue_put(q, pkt);
478 }
479 
480 /* packet queue handling */
482 {
483  memset(q, 0, sizeof(PacketQueue));
484  q->mutex = SDL_CreateMutex();
485  if (!q->mutex) {
486  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
487  return AVERROR(ENOMEM);
488  }
489  q->cond = SDL_CreateCond();
490  if (!q->cond) {
491  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
492  return AVERROR(ENOMEM);
493  }
494  q->abort_request = 1;
495  return 0;
496 }
497 
499 {
500  MyAVPacketList *pkt, *pkt1;
501 
502  SDL_LockMutex(q->mutex);
503  for (pkt = q->first_pkt; pkt; pkt = pkt1) {
504  pkt1 = pkt->next;
505  av_packet_unref(&pkt->pkt);
506  av_freep(&pkt);
507  }
508  q->last_pkt = NULL;
509  q->first_pkt = NULL;
510  q->nb_packets = 0;
511  q->size = 0;
512  q->duration = 0;
513  SDL_UnlockMutex(q->mutex);
514 }
515 
517 {
519  SDL_DestroyMutex(q->mutex);
520  SDL_DestroyCond(q->cond);
521 }
522 
524 {
525  SDL_LockMutex(q->mutex);
526 
527  q->abort_request = 1;
528 
529  SDL_CondSignal(q->cond);
530 
531  SDL_UnlockMutex(q->mutex);
532 }
533 
535 {
536  SDL_LockMutex(q->mutex);
537  q->abort_request = 0;
539  SDL_UnlockMutex(q->mutex);
540 }
541 
542 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
543 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
544 {
545  MyAVPacketList *pkt1;
546  int ret;
547 
548  SDL_LockMutex(q->mutex);
549 
550  for (;;) {
551  if (q->abort_request) {
552  ret = -1;
553  break;
554  }
555 
556  pkt1 = q->first_pkt;
557  if (pkt1) {
558  q->first_pkt = pkt1->next;
559  if (!q->first_pkt)
560  q->last_pkt = NULL;
561  q->nb_packets--;
562  q->size -= pkt1->pkt.size + sizeof(*pkt1);
563  q->duration -= pkt1->pkt.duration;
564  *pkt = pkt1->pkt;
565  if (serial)
566  *serial = pkt1->serial;
567  av_free(pkt1);
568  ret = 1;
569  break;
570  } else if (!block) {
571  ret = 0;
572  break;
573  } else {
574  SDL_CondWait(q->cond, q->mutex);
575  }
576  }
577  SDL_UnlockMutex(q->mutex);
578  return ret;
579 }
580 
581 static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
582  memset(d, 0, sizeof(Decoder));
583  d->avctx = avctx;
584  d->queue = queue;
585  d->empty_queue_cond = empty_queue_cond;
587  d->pkt_serial = -1;
588 }
589 
591  int ret = AVERROR(EAGAIN);
592 
593  for (;;) {
594  AVPacket pkt;
595 
596  if (d->queue->serial == d->pkt_serial) {
597  do {
598  if (d->queue->abort_request)
599  return -1;
600 
601  switch (d->avctx->codec_type) {
602  case AVMEDIA_TYPE_VIDEO:
604  if (ret >= 0) {
605  if (decoder_reorder_pts == -1) {
606  frame->pts = frame->best_effort_timestamp;
607  } else if (!decoder_reorder_pts) {
608  frame->pts = frame->pkt_dts;
609  }
610  }
611  break;
612  case AVMEDIA_TYPE_AUDIO:
614  if (ret >= 0) {
615  AVRational tb = (AVRational){1, frame->sample_rate};
616  if (frame->pts != AV_NOPTS_VALUE)
617  frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);
618  else if (d->next_pts != AV_NOPTS_VALUE)
619  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
620  if (frame->pts != AV_NOPTS_VALUE) {
621  d->next_pts = frame->pts + frame->nb_samples;
622  d->next_pts_tb = tb;
623  }
624  }
625  break;
626  }
627  if (ret == AVERROR_EOF) {
628  d->finished = d->pkt_serial;
630  return 0;
631  }
632  if (ret >= 0)
633  return 1;
634  } while (ret != AVERROR(EAGAIN));
635  }
636 
637  do {
638  if (d->queue->nb_packets == 0)
639  SDL_CondSignal(d->empty_queue_cond);
640  if (d->packet_pending) {
641  av_packet_move_ref(&pkt, &d->pkt);
642  d->packet_pending = 0;
643  } else {
644  if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
645  return -1;
646  }
647  } while (d->queue->serial != d->pkt_serial);
648 
649  if (pkt.data == flush_pkt.data) {
651  d->finished = 0;
652  d->next_pts = d->start_pts;
653  d->next_pts_tb = d->start_pts_tb;
654  } else {
656  int got_frame = 0;
657  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &pkt);
658  if (ret < 0) {
659  ret = AVERROR(EAGAIN);
660  } else {
661  if (got_frame && !pkt.data) {
662  d->packet_pending = 1;
663  av_packet_move_ref(&d->pkt, &pkt);
664  }
665  ret = got_frame ? 0 : (pkt.data ? AVERROR(EAGAIN) : AVERROR_EOF);
666  }
667  } else {
668  if (avcodec_send_packet(d->avctx, &pkt) == AVERROR(EAGAIN)) {
669  av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
670  d->packet_pending = 1;
671  av_packet_move_ref(&d->pkt, &pkt);
672  }
673  }
675  }
676  }
677 }
678 
679 static void decoder_destroy(Decoder *d) {
680  av_packet_unref(&d->pkt);
682 }
683 
685 {
686  av_frame_unref(vp->frame);
687  avsubtitle_free(&vp->sub);
688 }
689 
690 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
691 {
692  int i;
693  memset(f, 0, sizeof(FrameQueue));
694  if (!(f->mutex = SDL_CreateMutex())) {
695  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
696  return AVERROR(ENOMEM);
697  }
698  if (!(f->cond = SDL_CreateCond())) {
699  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
700  return AVERROR(ENOMEM);
701  }
702  f->pktq = pktq;
703  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
704  f->keep_last = !!keep_last;
705  for (i = 0; i < f->max_size; i++)
706  if (!(f->queue[i].frame = av_frame_alloc()))
707  return AVERROR(ENOMEM);
708  return 0;
709 }
710 
712 {
713  int i;
714  for (i = 0; i < f->max_size; i++) {
715  Frame *vp = &f->queue[i];
717  av_frame_free(&vp->frame);
718  }
719  SDL_DestroyMutex(f->mutex);
720  SDL_DestroyCond(f->cond);
721 }
722 
724 {
725  SDL_LockMutex(f->mutex);
726  SDL_CondSignal(f->cond);
727  SDL_UnlockMutex(f->mutex);
728 }
729 
731 {
732  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
733 }
734 
736 {
737  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
738 }
739 
741 {
742  return &f->queue[f->rindex];
743 }
744 
746 {
747  /* wait until we have space to put a new frame */
748  SDL_LockMutex(f->mutex);
749  while (f->size >= f->max_size &&
750  !f->pktq->abort_request) {
751  SDL_CondWait(f->cond, f->mutex);
752  }
753  SDL_UnlockMutex(f->mutex);
754 
755  if (f->pktq->abort_request)
756  return NULL;
757 
758  return &f->queue[f->windex];
759 }
760 
762 {
763  /* wait until we have a readable a new frame */
764  SDL_LockMutex(f->mutex);
765  while (f->size - f->rindex_shown <= 0 &&
766  !f->pktq->abort_request) {
767  SDL_CondWait(f->cond, f->mutex);
768  }
769  SDL_UnlockMutex(f->mutex);
770 
771  if (f->pktq->abort_request)
772  return NULL;
773 
774  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
775 }
776 
778 {
779  if (++f->windex == f->max_size)
780  f->windex = 0;
781  SDL_LockMutex(f->mutex);
782  f->size++;
783  SDL_CondSignal(f->cond);
784  SDL_UnlockMutex(f->mutex);
785 }
786 
788 {
789  if (f->keep_last && !f->rindex_shown) {
790  f->rindex_shown = 1;
791  return;
792  }
793  frame_queue_unref_item(&f->queue[f->rindex]);
794  if (++f->rindex == f->max_size)
795  f->rindex = 0;
796  SDL_LockMutex(f->mutex);
797  f->size--;
798  SDL_CondSignal(f->cond);
799  SDL_UnlockMutex(f->mutex);
800 }
801 
802 /* return the number of undisplayed frames in the queue */
804 {
805  return f->size - f->rindex_shown;
806 }
807 
808 /* return last shown position */
810 {
811  Frame *fp = &f->queue[f->rindex];
812  if (f->rindex_shown && fp->serial == f->pktq->serial)
813  return fp->pos;
814  else
815  return -1;
816 }
817 
818 static void decoder_abort(Decoder *d, FrameQueue *fq)
819 {
821  frame_queue_signal(fq);
822  SDL_WaitThread(d->decoder_tid, NULL);
823  d->decoder_tid = NULL;
825 }
826 
827 static inline void fill_rectangle(int x, int y, int w, int h)
828 {
829  SDL_Rect rect;
830  rect.x = x;
831  rect.y = y;
832  rect.w = w;
833  rect.h = h;
834  if (w && h)
835  SDL_RenderFillRect(renderer, &rect);
836 }
837 
838 static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
839 {
840  Uint32 format;
841  int access, w, h;
842  if (!*texture || SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
843  void *pixels;
844  int pitch;
845  if (*texture)
846  SDL_DestroyTexture(*texture);
847  if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
848  return -1;
849  if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
850  return -1;
851  if (init_texture) {
852  if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
853  return -1;
854  memset(pixels, 0, pitch * new_height);
855  SDL_UnlockTexture(*texture);
856  }
857  av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
858  }
859  return 0;
860 }
861 
862 static void calculate_display_rect(SDL_Rect *rect,
863  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
864  int pic_width, int pic_height, AVRational pic_sar)
865 {
866  AVRational aspect_ratio = pic_sar;
867  int64_t width, height, x, y;
868 
869  if (av_cmp_q(aspect_ratio, av_make_q(0, 1)) <= 0)
870  aspect_ratio = av_make_q(1, 1);
871 
872  aspect_ratio = av_mul_q(aspect_ratio, av_make_q(pic_width, pic_height));
873 
874  /* XXX: we suppose the screen has a 1.0 pixel ratio */
875  height = scr_height;
876  width = av_rescale(height, aspect_ratio.num, aspect_ratio.den) & ~1;
877  if (width > scr_width) {
878  width = scr_width;
879  height = av_rescale(width, aspect_ratio.den, aspect_ratio.num) & ~1;
880  }
881  x = (scr_width - width) / 2;
882  y = (scr_height - height) / 2;
883  rect->x = scr_xleft + x;
884  rect->y = scr_ytop + y;
885  rect->w = FFMAX((int)width, 1);
886  rect->h = FFMAX((int)height, 1);
887 }
888 
889 static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
890 {
891  int i;
892  *sdl_blendmode = SDL_BLENDMODE_NONE;
893  *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
894  if (format == AV_PIX_FMT_RGB32 ||
898  *sdl_blendmode = SDL_BLENDMODE_BLEND;
899  for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
901  *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
902  return;
903  }
904  }
905 }
906 
907 static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx) {
908  int ret = 0;
909  Uint32 sdl_pix_fmt;
910  SDL_BlendMode sdl_blendmode;
911  get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
912  if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
913  return -1;
914  switch (sdl_pix_fmt) {
915  case SDL_PIXELFORMAT_UNKNOWN:
916  /* This should only happen if we are not using avfilter... */
917  *img_convert_ctx = sws_getCachedContext(*img_convert_ctx,
918  frame->width, frame->height, frame->format, frame->width, frame->height,
920  if (*img_convert_ctx != NULL) {
921  uint8_t *pixels[4];
922  int pitch[4];
923  if (!SDL_LockTexture(*tex, NULL, (void **)pixels, pitch)) {
924  sws_scale(*img_convert_ctx, (const uint8_t * const *)frame->data, frame->linesize,
925  0, frame->height, pixels, pitch);
926  SDL_UnlockTexture(*tex);
927  }
928  } else {
929  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
930  ret = -1;
931  }
932  break;
933  case SDL_PIXELFORMAT_IYUV:
934  if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
935  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
936  frame->data[1], frame->linesize[1],
937  frame->data[2], frame->linesize[2]);
938  } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
939  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
940  frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
941  frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
942  } else {
943  av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
944  return -1;
945  }
946  break;
947  default:
948  if (frame->linesize[0] < 0) {
949  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
950  } else {
951  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
952  }
953  break;
954  }
955  return ret;
956 }
957 
959 {
960 #if SDL_VERSION_ATLEAST(2,0,8)
961  SDL_YUV_CONVERSION_MODE mode = SDL_YUV_CONVERSION_AUTOMATIC;
962  if (frame && (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUYV422 || frame->format == AV_PIX_FMT_UYVY422)) {
963  if (frame->color_range == AVCOL_RANGE_JPEG)
964  mode = SDL_YUV_CONVERSION_JPEG;
965  else if (frame->colorspace == AVCOL_SPC_BT709)
966  mode = SDL_YUV_CONVERSION_BT709;
967  else if (frame->colorspace == AVCOL_SPC_BT470BG || frame->colorspace == AVCOL_SPC_SMPTE170M || frame->colorspace == AVCOL_SPC_SMPTE240M)
968  mode = SDL_YUV_CONVERSION_BT601;
969  }
970  SDL_SetYUVConversionMode(mode);
971 #endif
972 }
973 
975 {
976  Frame *vp;
977  Frame *sp = NULL;
978  SDL_Rect rect;
979 
980  vp = frame_queue_peek_last(&is->pictq);
981  if (is->subtitle_st) {
982  if (frame_queue_nb_remaining(&is->subpq) > 0) {
983  sp = frame_queue_peek(&is->subpq);
984 
985  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
986  if (!sp->uploaded) {
987  uint8_t* pixels[4];
988  int pitch[4];
989  int i;
990  if (!sp->width || !sp->height) {
991  sp->width = vp->width;
992  sp->height = vp->height;
993  }
994  if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
995  return;
996 
997  for (i = 0; i < sp->sub.num_rects; i++) {
998  AVSubtitleRect *sub_rect = sp->sub.rects[i];
999 
1000  sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
1001  sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
1002  sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
1003  sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
1004 
1005  is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx,
1006  sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
1007  sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
1008  0, NULL, NULL, NULL);
1009  if (!is->sub_convert_ctx) {
1010  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1011  return;
1012  }
1013  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) {
1014  sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
1015  0, sub_rect->h, pixels, pitch);
1016  SDL_UnlockTexture(is->sub_texture);
1017  }
1018  }
1019  sp->uploaded = 1;
1020  }
1021  } else
1022  sp = NULL;
1023  }
1024  }
1025 
1026  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1027 
1028  if (!vp->uploaded) {
1029  if (upload_texture(&is->vid_texture, vp->frame, &is->img_convert_ctx) < 0)
1030  return;
1031  vp->uploaded = 1;
1032  vp->flip_v = vp->frame->linesize[0] < 0;
1033  }
1034 
1036  SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
1038  if (sp) {
1039 #if USE_ONEPASS_SUBTITLE_RENDER
1040  SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
1041 #else
1042  int i;
1043  double xratio = (double)rect.w / (double)sp->width;
1044  double yratio = (double)rect.h / (double)sp->height;
1045  for (i = 0; i < sp->sub.num_rects; i++) {
1046  SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
1047  SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
1048  .y = rect.y + sub_rect->y * yratio,
1049  .w = sub_rect->w * xratio,
1050  .h = sub_rect->h * yratio};
1051  SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
1052  }
1053 #endif
1054  }
1055 }
1056 
1057 static inline int compute_mod(int a, int b)
1058 {
1059  return a < 0 ? a%b + b : a%b;
1060 }
1061 
1063 {
1064  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1065  int ch, channels, h, h2;
1066  int64_t time_diff;
1067  int rdft_bits, nb_freq;
1068 
1069  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1070  ;
1071  nb_freq = 1 << (rdft_bits - 1);
1072 
1073  /* compute display index : center on currently output samples */
1074  channels = s->audio_tgt.channels;
1075  nb_display_channels = channels;
1076  if (!s->paused) {
1077  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1078  n = 2 * channels;
1079  delay = s->audio_write_buf_size;
1080  delay /= n;
1081 
1082  /* to be more precise, we take into account the time spent since
1083  the last buffer computation */
1084  if (audio_callback_time) {
1085  time_diff = av_gettime_relative() - audio_callback_time;
1086  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1087  }
1088 
1089  delay += 2 * data_used;
1090  if (delay < data_used)
1091  delay = data_used;
1092 
1093  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1094  if (s->show_mode == SHOW_MODE_WAVES) {
1095  h = INT_MIN;
1096  for (i = 0; i < 1000; i += channels) {
1097  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1098  int a = s->sample_array[idx];
1099  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1100  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1101  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1102  int score = a - d;
1103  if (h < score && (b ^ c) < 0) {
1104  h = score;
1105  i_start = idx;
1106  }
1107  }
1108  }
1109 
1110  s->last_i_start = i_start;
1111  } else {
1112  i_start = s->last_i_start;
1113  }
1114 
1115  if (s->show_mode == SHOW_MODE_WAVES) {
1116  SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
1117 
1118  /* total height for one channel */
1119  h = s->height / nb_display_channels;
1120  /* graph height / 2 */
1121  h2 = (h * 9) / 20;
1122  for (ch = 0; ch < nb_display_channels; ch++) {
1123  i = i_start + ch;
1124  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1125  for (x = 0; x < s->width; x++) {
1126  y = (s->sample_array[i] * h2) >> 15;
1127  if (y < 0) {
1128  y = -y;
1129  ys = y1 - y;
1130  } else {
1131  ys = y1;
1132  }
1133  fill_rectangle(s->xleft + x, ys, 1, y);
1134  i += channels;
1135  if (i >= SAMPLE_ARRAY_SIZE)
1136  i -= SAMPLE_ARRAY_SIZE;
1137  }
1138  }
1139 
1140  SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
1141 
1142  for (ch = 1; ch < nb_display_channels; ch++) {
1143  y = s->ytop + ch * h;
1144  fill_rectangle(s->xleft, y, s->width, 1);
1145  }
1146  } else {
1147  if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
1148  return;
1149 
1150  nb_display_channels= FFMIN(nb_display_channels, 2);
1151  if (rdft_bits != s->rdft_bits) {
1152  av_rdft_end(s->rdft);
1153  av_free(s->rdft_data);
1154  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
1155  s->rdft_bits = rdft_bits;
1156  s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
1157  }
1158  if (!s->rdft || !s->rdft_data){
1159  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1160  s->show_mode = SHOW_MODE_WAVES;
1161  } else {
1162  FFTSample *data[2];
1163  SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
1164  uint32_t *pixels;
1165  int pitch;
1166  for (ch = 0; ch < nb_display_channels; ch++) {
1167  data[ch] = s->rdft_data + 2 * nb_freq * ch;
1168  i = i_start + ch;
1169  for (x = 0; x < 2 * nb_freq; x++) {
1170  double w = (x-nb_freq) * (1.0 / nb_freq);
1171  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
1172  i += channels;
1173  if (i >= SAMPLE_ARRAY_SIZE)
1174  i -= SAMPLE_ARRAY_SIZE;
1175  }
1176  av_rdft_calc(s->rdft, data[ch]);
1177  }
1178  /* Least efficient way to do this, we should of course
1179  * directly access it but it is more than fast enough. */
1180  if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
1181  pitch >>= 2;
1182  pixels += pitch * s->height;
1183  for (y = 0; y < s->height; y++) {
1184  double w = 1 / sqrt(nb_freq);
1185  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1186  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][2 * y + 0], data[1][2 * y + 1]))
1187  : a;
1188  a = FFMIN(a, 255);
1189  b = FFMIN(b, 255);
1190  pixels -= pitch;
1191  *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
1192  }
1193  SDL_UnlockTexture(s->vis_texture);
1194  }
1195  SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
1196  }
1197  if (!s->paused)
1198  s->xpos++;
1199  if (s->xpos >= s->width)
1200  s->xpos= s->xleft;
1201  }
1202 }
1203 
1204 static void stream_component_close(VideoState *is, int stream_index)
1205 {
1206  AVFormatContext *ic = is->ic;
1207  AVCodecParameters *codecpar;
1208 
1209  if (stream_index < 0 || stream_index >= ic->nb_streams)
1210  return;
1211  codecpar = ic->streams[stream_index]->codecpar;
1212 
1213  switch (codecpar->codec_type) {
1214  case AVMEDIA_TYPE_AUDIO:
1215  decoder_abort(&is->auddec, &is->sampq);
1216  SDL_CloseAudioDevice(audio_dev);
1217  decoder_destroy(&is->auddec);
1218  swr_free(&is->swr_ctx);
1219  av_freep(&is->audio_buf1);
1220  is->audio_buf1_size = 0;
1221  is->audio_buf = NULL;
1222 
1223  if (is->rdft) {
1224  av_rdft_end(is->rdft);
1225  av_freep(&is->rdft_data);
1226  is->rdft = NULL;
1227  is->rdft_bits = 0;
1228  }
1229  break;
1230  case AVMEDIA_TYPE_VIDEO:
1231  decoder_abort(&is->viddec, &is->pictq);
1232  decoder_destroy(&is->viddec);
1233  break;
1234  case AVMEDIA_TYPE_SUBTITLE:
1235  decoder_abort(&is->subdec, &is->subpq);
1236  decoder_destroy(&is->subdec);
1237  break;
1238  default:
1239  break;
1240  }
1241 
1242  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1243  switch (codecpar->codec_type) {
1244  case AVMEDIA_TYPE_AUDIO:
1245  is->audio_st = NULL;
1246  is->audio_stream = -1;
1247  break;
1248  case AVMEDIA_TYPE_VIDEO:
1249  is->video_st = NULL;
1250  is->video_stream = -1;
1251  break;
1252  case AVMEDIA_TYPE_SUBTITLE:
1253  is->subtitle_st = NULL;
1254  is->subtitle_stream = -1;
1255  break;
1256  default:
1257  break;
1258  }
1259 }
1260 
1262 {
1263  /* XXX: use a special url_shutdown call to abort parse cleanly */
1264  is->abort_request = 1;
1265  SDL_WaitThread(is->read_tid, NULL);
1266 
1267  /* close each stream */
1268  if (is->audio_stream >= 0)
1269  stream_component_close(is, is->audio_stream);
1270  if (is->video_stream >= 0)
1271  stream_component_close(is, is->video_stream);
1272  if (is->subtitle_stream >= 0)
1273  stream_component_close(is, is->subtitle_stream);
1274 
1275  avformat_close_input(&is->ic);
1276 
1277  packet_queue_destroy(&is->videoq);
1278  packet_queue_destroy(&is->audioq);
1279  packet_queue_destroy(&is->subtitleq);
1280 
1281  /* free all pictures */
1282  frame_queue_destory(&is->pictq);
1283  frame_queue_destory(&is->sampq);
1284  frame_queue_destory(&is->subpq);
1285  SDL_DestroyCond(is->continue_read_thread);
1286  sws_freeContext(is->img_convert_ctx);
1287  sws_freeContext(is->sub_convert_ctx);
1288  av_free(is->filename);
1289  if (is->vis_texture)
1290  SDL_DestroyTexture(is->vis_texture);
1291  if (is->vid_texture)
1292  SDL_DestroyTexture(is->vid_texture);
1293  if (is->sub_texture)
1294  SDL_DestroyTexture(is->sub_texture);
1295  av_free(is);
1296 }
1297 
1298 static void do_exit(VideoState *is)
1299 {
1300  if (is) {
1301  stream_close(is);
1302  }
1303  if (renderer)
1304  SDL_DestroyRenderer(renderer);
1305  if (window)
1306  SDL_DestroyWindow(window);
1307  uninit_opts();
1308 #if CONFIG_AVFILTER
1309  av_freep(&vfilters_list);
1310 #endif
1312  if (show_status)
1313  printf("\n");
1314  SDL_Quit();
1315  av_log(NULL, AV_LOG_QUIET, "%s", "");
1316  exit(0);
1317 }
1318 
1319 static void sigterm_handler(int sig)
1320 {
1321  exit(123);
1322 }
1323 
1325 {
1326  SDL_Rect rect;
1327  int max_width = screen_width ? screen_width : INT_MAX;
1328  int max_height = screen_height ? screen_height : INT_MAX;
1329  if (max_width == INT_MAX && max_height == INT_MAX)
1330  max_height = height;
1331  calculate_display_rect(&rect, 0, 0, max_width, max_height, width, height, sar);
1332  default_width = rect.w;
1333  default_height = rect.h;
1334 }
1335 
1337 {
1338  int w,h;
1339 
1342 
1343  if (!window_title)
1345  SDL_SetWindowTitle(window, window_title);
1346 
1347  SDL_SetWindowSize(window, w, h);
1348  SDL_SetWindowPosition(window, screen_left, screen_top);
1349  if (is_full_screen)
1350  SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1351  SDL_ShowWindow(window);
1352 
1353  is->width = w;
1354  is->height = h;
1355 
1356  return 0;
1357 }
1358 
1359 /* display the current picture, if any */
1361 {
1362  if (!is->width)
1363  video_open(is);
1364 
1365  SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1366  SDL_RenderClear(renderer);
1367  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1369  else if (is->video_st)
1371  SDL_RenderPresent(renderer);
1372 }
1373 
1374 static double get_clock(Clock *c)
1375 {
1376  if (*c->queue_serial != c->serial)
1377  return NAN;
1378  if (c->paused) {
1379  return c->pts;
1380  } else {
1381  double time = av_gettime_relative() / 1000000.0;
1382  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1383  }
1384 }
1385 
1386 static void set_clock_at(Clock *c, double pts, int serial, double time)
1387 {
1388  c->pts = pts;
1389  c->last_updated = time;
1390  c->pts_drift = c->pts - time;
1391  c->serial = serial;
1392 }
1393 
1394 static void set_clock(Clock *c, double pts, int serial)
1395 {
1396  double time = av_gettime_relative() / 1000000.0;
1397  set_clock_at(c, pts, serial, time);
1398 }
1399 
1400 static void set_clock_speed(Clock *c, double speed)
1401 {
1402  set_clock(c, get_clock(c), c->serial);
1403  c->speed = speed;
1404 }
1405 
1406 static void init_clock(Clock *c, int *queue_serial)
1407 {
1408  c->speed = 1.0;
1409  c->paused = 0;
1410  c->queue_serial = queue_serial;
1411  set_clock(c, NAN, -1);
1412 }
1413 
1414 static void sync_clock_to_slave(Clock *c, Clock *slave)
1415 {
1416  double clock = get_clock(c);
1417  double slave_clock = get_clock(slave);
1418  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1419  set_clock(c, slave_clock, slave->serial);
1420 }
1421 
1423  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1424  if (is->video_st)
1425  return AV_SYNC_VIDEO_MASTER;
1426  else
1427  return AV_SYNC_AUDIO_MASTER;
1428  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1429  if (is->audio_st)
1430  return AV_SYNC_AUDIO_MASTER;
1431  else
1432  return AV_SYNC_EXTERNAL_CLOCK;
1433  } else {
1434  return AV_SYNC_EXTERNAL_CLOCK;
1435  }
1436 }
1437 
1438 /* get the current master clock value */
1440 {
1441  double val;
1442 
1443  switch (get_master_sync_type(is)) {
1444  case AV_SYNC_VIDEO_MASTER:
1445  val = get_clock(&is->vidclk);
1446  break;
1447  case AV_SYNC_AUDIO_MASTER:
1448  val = get_clock(&is->audclk);
1449  break;
1450  default:
1451  val = get_clock(&is->extclk);
1452  break;
1453  }
1454  return val;
1455 }
1456 
1458  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1459  is->audio_stream >= 0 && is->audioq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES) {
1461  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1462  (is->audio_stream < 0 || is->audioq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES)) {
1464  } else {
1465  double speed = is->extclk.speed;
1466  if (speed != 1.0)
1467  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1468  }
1469 }
1470 
1471 /* seek in the stream */
1472 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1473 {
1474  if (!is->seek_req) {
1475  is->seek_pos = pos;
1476  is->seek_rel = rel;
1477  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1478  if (seek_by_bytes)
1479  is->seek_flags |= AVSEEK_FLAG_BYTE;
1480  is->seek_req = 1;
1481  SDL_CondSignal(is->continue_read_thread);
1482  }
1483 }
1484 
1485 /* pause or resume the video */
1487 {
1488  if (is->paused) {
1489  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1490  if (is->read_pause_return != AVERROR(ENOSYS)) {
1491  is->vidclk.paused = 0;
1492  }
1493  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1494  }
1495  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1496  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1497 }
1498 
1500 {
1502  is->step = 0;
1503 }
1504 
1506 {
1507  is->muted = !is->muted;
1508 }
1509 
1510 static void update_volume(VideoState *is, int sign, double step)
1511 {
1512  double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1513  int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0));
1514  is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1515 }
1516 
1518 {
1519  /* if the stream is paused unpause it, then step */
1520  if (is->paused)
1522  is->step = 1;
1523 }
1524 
1525 static double compute_target_delay(double delay, VideoState *is)
1526 {
1527  double sync_threshold, diff = 0;
1528 
1529  /* update delay to follow master synchronisation source */
1531  /* if video is slave, we try to correct big delays by
1532  duplicating or deleting a frame */
1533  diff = get_clock(&is->vidclk) - get_master_clock(is);
1534 
1535  /* skip or repeat frame. We take into account the
1536  delay to compute the threshold. I still don't know
1537  if it is the best guess */
1538  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1539  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1540  if (diff <= -sync_threshold)
1541  delay = FFMAX(0, delay + diff);
1542  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1543  delay = delay + diff;
1544  else if (diff >= sync_threshold)
1545  delay = 2 * delay;
1546  }
1547  }
1548 
1549  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1550  delay, -diff);
1551 
1552  return delay;
1553 }
1554 
1555 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1556  if (vp->serial == nextvp->serial) {
1557  double duration = nextvp->pts - vp->pts;
1558  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1559  return vp->duration;
1560  else
1561  return duration;
1562  } else {
1563  return 0.0;
1564  }
1565 }
1566 
1567 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1568  /* update current video pts */
1569  set_clock(&is->vidclk, pts, serial);
1570  sync_clock_to_slave(&is->extclk, &is->vidclk);
1571 }
1572 
1573 /* called to display each frame */
1574 static void video_refresh(void *opaque, double *remaining_time)
1575 {
1576  VideoState *is = opaque;
1577  double time;
1578 
1579  Frame *sp, *sp2;
1580 
1581  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1583 
1584  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1585  time = av_gettime_relative() / 1000000.0;
1586  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1587  video_display(is);
1588  is->last_vis_time = time;
1589  }
1590  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1591  }
1592 
1593  if (is->video_st) {
1594 retry:
1595  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1596  // nothing to do, no picture to display in the queue
1597  } else {
1598  double last_duration, duration, delay;
1599  Frame *vp, *lastvp;
1600 
1601  /* dequeue the picture */
1602  lastvp = frame_queue_peek_last(&is->pictq);
1603  vp = frame_queue_peek(&is->pictq);
1604 
1605  if (vp->serial != is->videoq.serial) {
1606  frame_queue_next(&is->pictq);
1607  goto retry;
1608  }
1609 
1610  if (lastvp->serial != vp->serial)
1611  is->frame_timer = av_gettime_relative() / 1000000.0;
1612 
1613  if (is->paused)
1614  goto display;
1615 
1616  /* compute nominal last_duration */
1617  last_duration = vp_duration(is, lastvp, vp);
1618  delay = compute_target_delay(last_duration, is);
1619 
1620  time= av_gettime_relative()/1000000.0;
1621  if (time < is->frame_timer + delay) {
1622  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1623  goto display;
1624  }
1625 
1626  is->frame_timer += delay;
1627  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1628  is->frame_timer = time;
1629 
1630  SDL_LockMutex(is->pictq.mutex);
1631  if (!isnan(vp->pts))
1632  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1633  SDL_UnlockMutex(is->pictq.mutex);
1634 
1635  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1636  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1637  duration = vp_duration(is, vp, nextvp);
1638  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1639  is->frame_drops_late++;
1640  frame_queue_next(&is->pictq);
1641  goto retry;
1642  }
1643  }
1644 
1645  if (is->subtitle_st) {
1646  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1647  sp = frame_queue_peek(&is->subpq);
1648 
1649  if (frame_queue_nb_remaining(&is->subpq) > 1)
1650  sp2 = frame_queue_peek_next(&is->subpq);
1651  else
1652  sp2 = NULL;
1653 
1654  if (sp->serial != is->subtitleq.serial
1655  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1656  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1657  {
1658  if (sp->uploaded) {
1659  int i;
1660  for (i = 0; i < sp->sub.num_rects; i++) {
1661  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1662  uint8_t *pixels;
1663  int pitch, j;
1664 
1665  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
1666  for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1667  memset(pixels, 0, sub_rect->w << 2);
1668  SDL_UnlockTexture(is->sub_texture);
1669  }
1670  }
1671  }
1672  frame_queue_next(&is->subpq);
1673  } else {
1674  break;
1675  }
1676  }
1677  }
1678 
1679  frame_queue_next(&is->pictq);
1680  is->force_refresh = 1;
1681 
1682  if (is->step && !is->paused)
1684  }
1685 display:
1686  /* display picture */
1687  if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1688  video_display(is);
1689  }
1690  is->force_refresh = 0;
1691  if (show_status) {
1692  static int64_t last_time;
1693  int64_t cur_time;
1694  int aqsize, vqsize, sqsize;
1695  double av_diff;
1696 
1697  cur_time = av_gettime_relative();
1698  if (!last_time || (cur_time - last_time) >= 30000) {
1699  aqsize = 0;
1700  vqsize = 0;
1701  sqsize = 0;
1702  if (is->audio_st)
1703  aqsize = is->audioq.size;
1704  if (is->video_st)
1705  vqsize = is->videoq.size;
1706  if (is->subtitle_st)
1707  sqsize = is->subtitleq.size;
1708  av_diff = 0;
1709  if (is->audio_st && is->video_st)
1710  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1711  else if (is->video_st)
1712  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1713  else if (is->audio_st)
1714  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1716  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1718  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1719  av_diff,
1720  is->frame_drops_early + is->frame_drops_late,
1721  aqsize / 1024,
1722  vqsize / 1024,
1723  sqsize,
1724  is->video_st ? is->viddec.avctx->pts_correction_num_faulty_dts : 0,
1725  is->video_st ? is->viddec.avctx->pts_correction_num_faulty_pts : 0);
1726  fflush(stdout);
1727  last_time = cur_time;
1728  }
1729  }
1730 }
1731 
1732 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1733 {
1734  Frame *vp;
1735 
1736 #if defined(DEBUG_SYNC)
1737  printf("frame_type=%c pts=%0.3f\n",
1738  av_get_picture_type_char(src_frame->pict_type), pts);
1739 #endif
1740 
1741  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1742  return -1;
1743 
1744  vp->sar = src_frame->sample_aspect_ratio;
1745  vp->uploaded = 0;
1746 
1747  vp->width = src_frame->width;
1748  vp->height = src_frame->height;
1749  vp->format = src_frame->format;
1750 
1751  vp->pts = pts;
1752  vp->duration = duration;
1753  vp->pos = pos;
1754  vp->serial = serial;
1755 
1756  set_default_window_size(vp->width, vp->height, vp->sar);
1757 
1758  av_frame_move_ref(vp->frame, src_frame);
1759  frame_queue_push(&is->pictq);
1760  return 0;
1761 }
1762 
1764 {
1765  int got_picture;
1766 
1767  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1768  return -1;
1769 
1770  if (got_picture) {
1771  double dpts = NAN;
1772 
1773  if (frame->pts != AV_NOPTS_VALUE)
1774  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1775 
1776  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1777 
1779  if (frame->pts != AV_NOPTS_VALUE) {
1780  double diff = dpts - get_master_clock(is);
1781  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1782  diff - is->frame_last_filter_delay < 0 &&
1783  is->viddec.pkt_serial == is->vidclk.serial &&
1784  is->videoq.nb_packets) {
1785  is->frame_drops_early++;
1787  got_picture = 0;
1788  }
1789  }
1790  }
1791  }
1792 
1793  return got_picture;
1794 }
1795 
1796 #if CONFIG_AVFILTER
1797 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1798  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1799 {
1800  int ret, i;
1801  int nb_filters = graph->nb_filters;
1803 
1804  if (filtergraph) {
1807  if (!outputs || !inputs) {
1808  ret = AVERROR(ENOMEM);
1809  goto fail;
1810  }
1811 
1812  outputs->name = av_strdup("in");
1813  outputs->filter_ctx = source_ctx;
1814  outputs->pad_idx = 0;
1815  outputs->next = NULL;
1816 
1817  inputs->name = av_strdup("out");
1818  inputs->filter_ctx = sink_ctx;
1819  inputs->pad_idx = 0;
1820  inputs->next = NULL;
1821 
1822  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1823  goto fail;
1824  } else {
1825  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1826  goto fail;
1827  }
1828 
1829  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1830  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1831  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1832 
1833  ret = avfilter_graph_config(graph, NULL);
1834 fail:
1837  return ret;
1838 }
1839 
1840 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1841 {
1843  char sws_flags_str[512] = "";
1844  char buffersrc_args[256];
1845  int ret;
1846  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1847  AVCodecParameters *codecpar = is->video_st->codecpar;
1848  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1849  AVDictionaryEntry *e = NULL;
1850  int nb_pix_fmts = 0;
1851  int i, j;
1852 
1853  for (i = 0; i < renderer_info.num_texture_formats; i++) {
1854  for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; j++) {
1855  if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
1856  pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
1857  break;
1858  }
1859  }
1860  }
1861  pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE;
1862 
1863  while ((e = av_dict_get(sws_dict, "", e, AV_DICT_IGNORE_SUFFIX))) {
1864  if (!strcmp(e->key, "sws_flags")) {
1865  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1866  } else
1867  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1868  }
1869  if (strlen(sws_flags_str))
1870  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1871 
1872  graph->scale_sws_opts = av_strdup(sws_flags_str);
1873 
1874  snprintf(buffersrc_args, sizeof(buffersrc_args),
1875  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1876  frame->width, frame->height, frame->format,
1877  is->video_st->time_base.num, is->video_st->time_base.den,
1878  codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1));
1879  if (fr.num && fr.den)
1880  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1881 
1882  if ((ret = avfilter_graph_create_filter(&filt_src,
1883  avfilter_get_by_name("buffer"),
1884  "ffplay_buffer", buffersrc_args, NULL,
1885  graph)) < 0)
1886  goto fail;
1887 
1888  ret = avfilter_graph_create_filter(&filt_out,
1889  avfilter_get_by_name("buffersink"),
1890  "ffplay_buffersink", NULL, NULL, graph);
1891  if (ret < 0)
1892  goto fail;
1893 
1894  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1895  goto fail;
1896 
1897  last_filter = filt_out;
1898 
1899 /* Note: this macro adds a filter before the lastly added filter, so the
1900  * processing order of the filters is in reverse */
1901 #define INSERT_FILT(name, arg) do { \
1902  AVFilterContext *filt_ctx; \
1903  \
1904  ret = avfilter_graph_create_filter(&filt_ctx, \
1905  avfilter_get_by_name(name), \
1906  "ffplay_" name, arg, NULL, graph); \
1907  if (ret < 0) \
1908  goto fail; \
1909  \
1910  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1911  if (ret < 0) \
1912  goto fail; \
1913  \
1914  last_filter = filt_ctx; \
1915 } while (0)
1916 
1917  if (autorotate) {
1918  double theta = get_rotation(is->video_st);
1919 
1920  if (fabs(theta - 90) < 1.0) {
1921  INSERT_FILT("transpose", "clock");
1922  } else if (fabs(theta - 180) < 1.0) {
1923  INSERT_FILT("hflip", NULL);
1924  INSERT_FILT("vflip", NULL);
1925  } else if (fabs(theta - 270) < 1.0) {
1926  INSERT_FILT("transpose", "cclock");
1927  } else if (fabs(theta) > 1.0) {
1928  char rotate_buf[64];
1929  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1930  INSERT_FILT("rotate", rotate_buf);
1931  }
1932  }
1933 
1934  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1935  goto fail;
1936 
1937  is->in_video_filter = filt_src;
1938  is->out_video_filter = filt_out;
1939 
1940 fail:
1941  return ret;
1942 }
1943 
1944 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1945 {
1947  int sample_rates[2] = { 0, -1 };
1948  int64_t channel_layouts[2] = { 0, -1 };
1949  int channels[2] = { 0, -1 };
1950  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1951  char aresample_swr_opts[512] = "";
1952  AVDictionaryEntry *e = NULL;
1953  char asrc_args[256];
1954  int ret;
1955 
1956  avfilter_graph_free(&is->agraph);
1957  if (!(is->agraph = avfilter_graph_alloc()))
1958  return AVERROR(ENOMEM);
1959  is->agraph->nb_threads = filter_nbthreads;
1960 
1961  while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
1962  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
1963  if (strlen(aresample_swr_opts))
1964  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
1965  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
1966 
1967  ret = snprintf(asrc_args, sizeof(asrc_args),
1968  "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1969  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1970  is->audio_filter_src.channels,
1971  1, is->audio_filter_src.freq);
1972  if (is->audio_filter_src.channel_layout)
1973  snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1974  ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
1975 
1976  ret = avfilter_graph_create_filter(&filt_asrc,
1977  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1978  asrc_args, NULL, is->agraph);
1979  if (ret < 0)
1980  goto end;
1981 
1982 
1983  ret = avfilter_graph_create_filter(&filt_asink,
1984  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1985  NULL, NULL, is->agraph);
1986  if (ret < 0)
1987  goto end;
1988 
1989  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1990  goto end;
1991  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1992  goto end;
1993 
1994  if (force_output_format) {
1995  channel_layouts[0] = is->audio_tgt.channel_layout;
1996  channels [0] = is->audio_tgt.channels;
1997  sample_rates [0] = is->audio_tgt.freq;
1998  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
1999  goto end;
2000  if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2001  goto end;
2002  if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2003  goto end;
2004  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2005  goto end;
2006  }
2007 
2008 
2009  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2010  goto end;
2011 
2012  is->in_audio_filter = filt_asrc;
2013  is->out_audio_filter = filt_asink;
2014 
2015 end:
2016  if (ret < 0)
2017  avfilter_graph_free(&is->agraph);
2018  return ret;
2019 }
2020 #endif /* CONFIG_AVFILTER */
2021 
2022 static int audio_thread(void *arg)
2023 {
2024  VideoState *is = arg;
2026  Frame *af;
2027 #if CONFIG_AVFILTER
2028  int last_serial = -1;
2029  int64_t dec_channel_layout;
2030  int reconfigure;
2031 #endif
2032  int got_frame = 0;
2033  AVRational tb;
2034  int ret = 0;
2035 
2036  if (!frame)
2037  return AVERROR(ENOMEM);
2038 
2039  do {
2040  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2041  goto the_end;
2042 
2043  if (got_frame) {
2044  tb = (AVRational){1, frame->sample_rate};
2045 
2046 #if CONFIG_AVFILTER
2047  dec_channel_layout = get_valid_channel_layout(frame->channel_layout, frame->channels);
2048 
2049  reconfigure =
2050  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2051  frame->format, frame->channels) ||
2052  is->audio_filter_src.channel_layout != dec_channel_layout ||
2053  is->audio_filter_src.freq != frame->sample_rate ||
2054  is->auddec.pkt_serial != last_serial;
2055 
2056  if (reconfigure) {
2057  char buf1[1024], buf2[1024];
2058  av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2059  av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2061  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2062  is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2063  frame->sample_rate, frame->channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
2064 
2065  is->audio_filter_src.fmt = frame->format;
2066  is->audio_filter_src.channels = frame->channels;
2067  is->audio_filter_src.channel_layout = dec_channel_layout;
2068  is->audio_filter_src.freq = frame->sample_rate;
2069  last_serial = is->auddec.pkt_serial;
2070 
2071  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2072  goto the_end;
2073  }
2074 
2075  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2076  goto the_end;
2077 
2078  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2079  tb = av_buffersink_get_time_base(is->out_audio_filter);
2080 #endif
2081  if (!(af = frame_queue_peek_writable(&is->sampq)))
2082  goto the_end;
2083 
2084  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2085  af->pos = frame->pkt_pos;
2086  af->serial = is->auddec.pkt_serial;
2087  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2088 
2090  frame_queue_push(&is->sampq);
2091 
2092 #if CONFIG_AVFILTER
2093  if (is->audioq.serial != is->auddec.pkt_serial)
2094  break;
2095  }
2096  if (ret == AVERROR_EOF)
2097  is->auddec.finished = is->auddec.pkt_serial;
2098 #endif
2099  }
2100  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2101  the_end:
2102 #if CONFIG_AVFILTER
2103  avfilter_graph_free(&is->agraph);
2104 #endif
2105  av_frame_free(&frame);
2106  return ret;
2107 }
2108 
2109 static int decoder_start(Decoder *d, int (*fn)(void *), const char *thread_name, void* arg)
2110 {
2112  d->decoder_tid = SDL_CreateThread(fn, thread_name, arg);
2113  if (!d->decoder_tid) {
2114  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2115  return AVERROR(ENOMEM);
2116  }
2117  return 0;
2118 }
2119 
2120 static int video_thread(void *arg)
2121 {
2122  VideoState *is = arg;
2124  double pts;
2125  double duration;
2126  int ret;
2127  AVRational tb = is->video_st->time_base;
2128  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2129 
2130 #if CONFIG_AVFILTER
2131  AVFilterGraph *graph = NULL;
2132  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2133  int last_w = 0;
2134  int last_h = 0;
2135  enum AVPixelFormat last_format = -2;
2136  int last_serial = -1;
2137  int last_vfilter_idx = 0;
2138 #endif
2139 
2140  if (!frame)
2141  return AVERROR(ENOMEM);
2142 
2143  for (;;) {
2145  if (ret < 0)
2146  goto the_end;
2147  if (!ret)
2148  continue;
2149 
2150 #if CONFIG_AVFILTER
2151  if ( last_w != frame->width
2152  || last_h != frame->height
2153  || last_format != frame->format
2154  || last_serial != is->viddec.pkt_serial
2155  || last_vfilter_idx != is->vfilter_idx) {
2157  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2158  last_w, last_h,
2159  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2160  frame->width, frame->height,
2161  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2162  avfilter_graph_free(&graph);
2163  graph = avfilter_graph_alloc();
2164  if (!graph) {
2165  ret = AVERROR(ENOMEM);
2166  goto the_end;
2167  }
2168  graph->nb_threads = filter_nbthreads;
2169  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2170  SDL_Event event;
2171  event.type = FF_QUIT_EVENT;
2172  event.user.data1 = is;
2173  SDL_PushEvent(&event);
2174  goto the_end;
2175  }
2176  filt_in = is->in_video_filter;
2177  filt_out = is->out_video_filter;
2178  last_w = frame->width;
2179  last_h = frame->height;
2180  last_format = frame->format;
2181  last_serial = is->viddec.pkt_serial;
2182  last_vfilter_idx = is->vfilter_idx;
2183  frame_rate = av_buffersink_get_frame_rate(filt_out);
2184  }
2185 
2186  ret = av_buffersrc_add_frame(filt_in, frame);
2187  if (ret < 0)
2188  goto the_end;
2189 
2190  while (ret >= 0) {
2191  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2192 
2193  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2194  if (ret < 0) {
2195  if (ret == AVERROR_EOF)
2196  is->viddec.finished = is->viddec.pkt_serial;
2197  ret = 0;
2198  break;
2199  }
2200 
2201  is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
2202  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2203  is->frame_last_filter_delay = 0;
2204  tb = av_buffersink_get_time_base(filt_out);
2205 #endif
2206  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2207  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2208  ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);
2210 #if CONFIG_AVFILTER
2211  if (is->videoq.serial != is->viddec.pkt_serial)
2212  break;
2213  }
2214 #endif
2215 
2216  if (ret < 0)
2217  goto the_end;
2218  }
2219  the_end:
2220 #if CONFIG_AVFILTER
2221  avfilter_graph_free(&graph);
2222 #endif
2223  av_frame_free(&frame);
2224  return 0;
2225 }
2226 
2227 static int subtitle_thread(void *arg)
2228 {
2229  VideoState *is = arg;
2230  Frame *sp;
2231  int got_subtitle;
2232  double pts;
2233 
2234  for (;;) {
2235  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2236  return 0;
2237 
2238  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2239  break;
2240 
2241  pts = 0;
2242 
2243  if (got_subtitle && sp->sub.format == 0) {
2244  if (sp->sub.pts != AV_NOPTS_VALUE)
2245  pts = sp->sub.pts / (double)AV_TIME_BASE;
2246  sp->pts = pts;
2247  sp->serial = is->subdec.pkt_serial;
2248  sp->width = is->subdec.avctx->width;
2249  sp->height = is->subdec.avctx->height;
2250  sp->uploaded = 0;
2251 
2252  /* now we can update the picture count */
2253  frame_queue_push(&is->subpq);
2254  } else if (got_subtitle) {
2255  avsubtitle_free(&sp->sub);
2256  }
2257  }
2258  return 0;
2259 }
2260 
2261 /* copy samples for viewing in editor window */
2262 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2263 {
2264  int size, len;
2265 
2266  size = samples_size / sizeof(short);
2267  while (size > 0) {
2268  len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2269  if (len > size)
2270  len = size;
2271  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2272  samples += len;
2273  is->sample_array_index += len;
2274  if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2275  is->sample_array_index = 0;
2276  size -= len;
2277  }
2278 }
2279 
2280 /* return the wanted number of samples to get better sync if sync_type is video
2281  * or external master clock */
2282 static int synchronize_audio(VideoState *is, int nb_samples)
2283 {
2284  int wanted_nb_samples = nb_samples;
2285 
2286  /* if not master, then we try to remove or add samples to correct the clock */
2288  double diff, avg_diff;
2289  int min_nb_samples, max_nb_samples;
2290 
2291  diff = get_clock(&is->audclk) - get_master_clock(is);
2292 
2293  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2294  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2295  if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2296  /* not enough measures to have a correct estimate */
2297  is->audio_diff_avg_count++;
2298  } else {
2299  /* estimate the A-V difference */
2300  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2301 
2302  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2303  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2304  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2305  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2306  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2307  }
2308  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2309  diff, avg_diff, wanted_nb_samples - nb_samples,
2310  is->audio_clock, is->audio_diff_threshold);
2311  }
2312  } else {
2313  /* too big difference : may be initial PTS errors, so
2314  reset A-V filter */
2315  is->audio_diff_avg_count = 0;
2316  is->audio_diff_cum = 0;
2317  }
2318  }
2319 
2320  return wanted_nb_samples;
2321 }
2322 
2323 /**
2324  * Decode one audio frame and return its uncompressed size.
2325  *
2326  * The processed audio frame is decoded, converted if required, and
2327  * stored in is->audio_buf, with size in bytes given by the return
2328  * value.
2329  */
2331 {
2332  int data_size, resampled_data_size;
2333  int64_t dec_channel_layout;
2334  av_unused double audio_clock0;
2335  int wanted_nb_samples;
2336  Frame *af;
2337 
2338  if (is->paused)
2339  return -1;
2340 
2341  do {
2342 #if defined(_WIN32)
2343  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2344  if ((av_gettime_relative() - audio_callback_time) > 1000000LL * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec / 2)
2345  return -1;
2346  av_usleep (1000);
2347  }
2348 #endif
2349  if (!(af = frame_queue_peek_readable(&is->sampq)))
2350  return -1;
2351  frame_queue_next(&is->sampq);
2352  } while (af->serial != is->audioq.serial);
2353 
2354  data_size = av_samples_get_buffer_size(NULL, af->frame->channels,
2355  af->frame->nb_samples,
2356  af->frame->format, 1);
2357 
2358  dec_channel_layout =
2361  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2362 
2363  if (af->frame->format != is->audio_src.fmt ||
2364  dec_channel_layout != is->audio_src.channel_layout ||
2365  af->frame->sample_rate != is->audio_src.freq ||
2366  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2367  swr_free(&is->swr_ctx);
2368  is->swr_ctx = swr_alloc_set_opts(NULL,
2369  is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2370  dec_channel_layout, af->frame->format, af->frame->sample_rate,
2371  0, NULL);
2372  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2374  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2376  is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
2377  swr_free(&is->swr_ctx);
2378  return -1;
2379  }
2380  is->audio_src.channel_layout = dec_channel_layout;
2381  is->audio_src.channels = af->frame->channels;
2382  is->audio_src.freq = af->frame->sample_rate;
2383  is->audio_src.fmt = af->frame->format;
2384  }
2385 
2386  if (is->swr_ctx) {
2387  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2388  uint8_t **out = &is->audio_buf1;
2389  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2390  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2391  int len2;
2392  if (out_size < 0) {
2393  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2394  return -1;
2395  }
2396  if (wanted_nb_samples != af->frame->nb_samples) {
2397  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2398  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2399  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2400  return -1;
2401  }
2402  }
2403  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2404  if (!is->audio_buf1)
2405  return AVERROR(ENOMEM);
2406  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2407  if (len2 < 0) {
2408  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2409  return -1;
2410  }
2411  if (len2 == out_count) {
2412  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2413  if (swr_init(is->swr_ctx) < 0)
2414  swr_free(&is->swr_ctx);
2415  }
2416  is->audio_buf = is->audio_buf1;
2417  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2418  } else {
2419  is->audio_buf = af->frame->data[0];
2420  resampled_data_size = data_size;
2421  }
2422 
2423  audio_clock0 = is->audio_clock;
2424  /* update the audio clock with the pts */
2425  if (!isnan(af->pts))
2426  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2427  else
2428  is->audio_clock = NAN;
2429  is->audio_clock_serial = af->serial;
2430 #ifdef DEBUG
2431  {
2432  static double last_clock;
2433  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2434  is->audio_clock - last_clock,
2435  is->audio_clock, audio_clock0);
2436  last_clock = is->audio_clock;
2437  }
2438 #endif
2439  return resampled_data_size;
2440 }
2441 
2442 /* prepare a new audio buffer */
2443 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2444 {
2445  VideoState *is = opaque;
2446  int audio_size, len1;
2447 
2449 
2450  while (len > 0) {
2451  if (is->audio_buf_index >= is->audio_buf_size) {
2452  audio_size = audio_decode_frame(is);
2453  if (audio_size < 0) {
2454  /* if error, just output silence */
2455  is->audio_buf = NULL;
2456  is->audio_buf_size = SDL_AUDIO_MIN_BUFFER_SIZE / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2457  } else {
2458  if (is->show_mode != SHOW_MODE_VIDEO)
2459  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2460  is->audio_buf_size = audio_size;
2461  }
2462  is->audio_buf_index = 0;
2463  }
2464  len1 = is->audio_buf_size - is->audio_buf_index;
2465  if (len1 > len)
2466  len1 = len;
2467  if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2468  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2469  else {
2470  memset(stream, 0, len1);
2471  if (!is->muted && is->audio_buf)
2472  SDL_MixAudioFormat(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, is->audio_volume);
2473  }
2474  len -= len1;
2475  stream += len1;
2476  is->audio_buf_index += len1;
2477  }
2478  is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2479  /* Let's assume the audio driver that is used by SDL has two periods. */
2480  if (!isnan(is->audio_clock)) {
2481  set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
2482  sync_clock_to_slave(&is->extclk, &is->audclk);
2483  }
2484 }
2485 
2486 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2487 {
2488  SDL_AudioSpec wanted_spec, spec;
2489  const char *env;
2490  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2491  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2492  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2493 
2494  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2495  if (env) {
2496  wanted_nb_channels = atoi(env);
2497  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2498  }
2499  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2500  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2501  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2502  }
2503  wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2504  wanted_spec.channels = wanted_nb_channels;
2505  wanted_spec.freq = wanted_sample_rate;
2506  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2507  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2508  return -1;
2509  }
2510  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2511  next_sample_rate_idx--;
2512  wanted_spec.format = AUDIO_S16SYS;
2513  wanted_spec.silence = 0;
2514  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2515  wanted_spec.callback = sdl_audio_callback;
2516  wanted_spec.userdata = opaque;
2517  while (!(audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2518  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2519  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2520  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2521  if (!wanted_spec.channels) {
2522  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2523  wanted_spec.channels = wanted_nb_channels;
2524  if (!wanted_spec.freq) {
2526  "No more combinations to try, audio open failed\n");
2527  return -1;
2528  }
2529  }
2530  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2531  }
2532  if (spec.format != AUDIO_S16SYS) {
2534  "SDL advised audio format %d is not supported!\n", spec.format);
2535  return -1;
2536  }
2537  if (spec.channels != wanted_spec.channels) {
2538  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2539  if (!wanted_channel_layout) {
2541  "SDL advised channel count %d is not supported!\n", spec.channels);
2542  return -1;
2543  }
2544  }
2545 
2546  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2547  audio_hw_params->freq = spec.freq;
2548  audio_hw_params->channel_layout = wanted_channel_layout;
2549  audio_hw_params->channels = spec.channels;
2550  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
2551  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2552  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2553  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2554  return -1;
2555  }
2556  return spec.size;
2557 }
2558 
2559 /* open a given stream. Return 0 if OK */
2560 static int stream_component_open(VideoState *is, int stream_index)
2561 {
2562  AVFormatContext *ic = is->ic;
2563  AVCodecContext *avctx;
2564  AVCodec *codec;
2565  const char *forced_codec_name = NULL;
2566  AVDictionary *opts = NULL;
2567  AVDictionaryEntry *t = NULL;
2568  int sample_rate, nb_channels;
2569  int64_t channel_layout;
2570  int ret = 0;
2571  int stream_lowres = lowres;
2572 
2573  if (stream_index < 0 || stream_index >= ic->nb_streams)
2574  return -1;
2575 
2576  avctx = avcodec_alloc_context3(NULL);
2577  if (!avctx)
2578  return AVERROR(ENOMEM);
2579 
2580  ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2581  if (ret < 0)
2582  goto fail;
2583  avctx->pkt_timebase = ic->streams[stream_index]->time_base;
2584 
2585  codec = avcodec_find_decoder(avctx->codec_id);
2586 
2587  switch(avctx->codec_type){
2588  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2589  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2590  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2591  }
2592  if (forced_codec_name)
2593  codec = avcodec_find_decoder_by_name(forced_codec_name);
2594  if (!codec) {
2595  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2596  "No codec could be found with name '%s'\n", forced_codec_name);
2597  else av_log(NULL, AV_LOG_WARNING,
2598  "No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id));
2599  ret = AVERROR(EINVAL);
2600  goto fail;
2601  }
2602 
2603  avctx->codec_id = codec->id;
2604  if (stream_lowres > codec->max_lowres) {
2605  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2606  codec->max_lowres);
2607  stream_lowres = codec->max_lowres;
2608  }
2609  avctx->lowres = stream_lowres;
2610 
2611  if (fast)
2612  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2613 
2614  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2615  if (!av_dict_get(opts, "threads", NULL, 0))
2616  av_dict_set(&opts, "threads", "auto", 0);
2617  if (stream_lowres)
2618  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2619  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2620  av_dict_set(&opts, "refcounted_frames", "1", 0);
2621  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2622  goto fail;
2623  }
2624  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2625  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2627  goto fail;
2628  }
2629 
2630  is->eof = 0;
2631  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2632  switch (avctx->codec_type) {
2633  case AVMEDIA_TYPE_AUDIO:
2634 #if CONFIG_AVFILTER
2635  {
2636  AVFilterContext *sink;
2637 
2638  is->audio_filter_src.freq = avctx->sample_rate;
2639  is->audio_filter_src.channels = avctx->channels;
2640  is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2641  is->audio_filter_src.fmt = avctx->sample_fmt;
2642  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2643  goto fail;
2644  sink = is->out_audio_filter;
2647  channel_layout = av_buffersink_get_channel_layout(sink);
2648  }
2649 #else
2650  sample_rate = avctx->sample_rate;
2651  nb_channels = avctx->channels;
2652  channel_layout = avctx->channel_layout;
2653 #endif
2654 
2655  /* prepare audio output */
2656  if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2657  goto fail;
2658  is->audio_hw_buf_size = ret;
2659  is->audio_src = is->audio_tgt;
2660  is->audio_buf_size = 0;
2661  is->audio_buf_index = 0;
2662 
2663  /* init averaging filter */
2664  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2665  is->audio_diff_avg_count = 0;
2666  /* since we do not have a precise anough audio FIFO fullness,
2667  we correct audio sync only if larger than this threshold */
2668  is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;
2669 
2670  is->audio_stream = stream_index;
2671  is->audio_st = ic->streams[stream_index];
2672 
2673  decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
2674  if ((is->ic->iformat->flags & (AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH | AVFMT_NO_BYTE_SEEK)) && !is->ic->iformat->read_seek) {
2675  is->auddec.start_pts = is->audio_st->start_time;
2676  is->auddec.start_pts_tb = is->audio_st->time_base;
2677  }
2678  if ((ret = decoder_start(&is->auddec, audio_thread, "audio_decoder", is)) < 0)
2679  goto out;
2680  SDL_PauseAudioDevice(audio_dev, 0);
2681  break;
2682  case AVMEDIA_TYPE_VIDEO:
2683  is->video_stream = stream_index;
2684  is->video_st = ic->streams[stream_index];
2685 
2686  decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
2687  if ((ret = decoder_start(&is->viddec, video_thread, "video_decoder", is)) < 0)
2688  goto out;
2689  is->queue_attachments_req = 1;
2690  break;
2691  case AVMEDIA_TYPE_SUBTITLE:
2692  is->subtitle_stream = stream_index;
2693  is->subtitle_st = ic->streams[stream_index];
2694 
2695  decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
2696  if ((ret = decoder_start(&is->subdec, subtitle_thread, "subtitle_decoder", is)) < 0)
2697  goto out;
2698  break;
2699  default:
2700  break;
2701  }
2702  goto out;
2703 
2704 fail:
2705  avcodec_free_context(&avctx);
2706 out:
2707  av_dict_free(&opts);
2708 
2709  return ret;
2710 }
2711 
2712 static int decode_interrupt_cb(void *ctx)
2713 {
2714  VideoState *is = ctx;
2715  return is->abort_request;
2716 }
2717 
2718 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2719  return stream_id < 0 ||
2720  queue->abort_request ||
2722  queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2723 }
2724 
2726 {
2727  if( !strcmp(s->iformat->name, "rtp")
2728  || !strcmp(s->iformat->name, "rtsp")
2729  || !strcmp(s->iformat->name, "sdp")
2730  )
2731  return 1;
2732 
2733  if(s->pb && ( !strncmp(s->url, "rtp:", 4)
2734  || !strncmp(s->url, "udp:", 4)
2735  )
2736  )
2737  return 1;
2738  return 0;
2739 }
2740 
2741 /* this thread gets the stream from the disk or the network */
2742 static int read_thread(void *arg)
2743 {
2744  VideoState *is = arg;
2745  AVFormatContext *ic = NULL;
2746  int err, i, ret;
2747  int st_index[AVMEDIA_TYPE_NB];
2748  AVPacket pkt1, *pkt = &pkt1;
2749  int64_t stream_start_time;
2750  int pkt_in_play_range = 0;
2751  AVDictionaryEntry *t;
2752  SDL_mutex *wait_mutex = SDL_CreateMutex();
2753  int scan_all_pmts_set = 0;
2754  int64_t pkt_ts;
2755 
2756  if (!wait_mutex) {
2757  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2758  ret = AVERROR(ENOMEM);
2759  goto fail;
2760  }
2761 
2762  memset(st_index, -1, sizeof(st_index));
2763  is->eof = 0;
2764 
2765  ic = avformat_alloc_context();
2766  if (!ic) {
2767  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2768  ret = AVERROR(ENOMEM);
2769  goto fail;
2770  }
2773  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2774  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2775  scan_all_pmts_set = 1;
2776  }
2777  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2778  if (err < 0) {
2779  print_error(is->filename, err);
2780  ret = -1;
2781  goto fail;
2782  }
2783  if (scan_all_pmts_set)
2784  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2785 
2787  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2789  goto fail;
2790  }
2791  is->ic = ic;
2792 
2793  if (genpts)
2794  ic->flags |= AVFMT_FLAG_GENPTS;
2795 
2797 
2798  if (find_stream_info) {
2800  int orig_nb_streams = ic->nb_streams;
2801 
2802  err = avformat_find_stream_info(ic, opts);
2803 
2804  for (i = 0; i < orig_nb_streams; i++)
2805  av_dict_free(&opts[i]);
2806  av_freep(&opts);
2807 
2808  if (err < 0) {
2810  "%s: could not find codec parameters\n", is->filename);
2811  ret = -1;
2812  goto fail;
2813  }
2814  }
2815 
2816  if (ic->pb)
2817  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2818 
2819  if (seek_by_bytes < 0)
2820  seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2821 
2822  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2823 
2824  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2825  window_title = av_asprintf("%s - %s", t->value, input_filename);
2826 
2827  /* if seeking requested, we execute it */
2828  if (start_time != AV_NOPTS_VALUE) {
2829  int64_t timestamp;
2830 
2831  timestamp = start_time;
2832  /* add the stream start time */
2833  if (ic->start_time != AV_NOPTS_VALUE)
2834  timestamp += ic->start_time;
2835  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2836  if (ret < 0) {
2837  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2838  is->filename, (double)timestamp / AV_TIME_BASE);
2839  }
2840  }
2841 
2842  is->realtime = is_realtime(ic);
2843 
2844  if (show_status)
2845  av_dump_format(ic, 0, is->filename, 0);
2846 
2847  for (i = 0; i < ic->nb_streams; i++) {
2848  AVStream *st = ic->streams[i];
2849  enum AVMediaType type = st->codecpar->codec_type;
2850  st->discard = AVDISCARD_ALL;
2851  if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
2853  st_index[type] = i;
2854  }
2855  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2856  if (wanted_stream_spec[i] && st_index[i] == -1) {
2857  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2858  st_index[i] = INT_MAX;
2859  }
2860  }
2861 
2862  if (!video_disable)
2863  st_index[AVMEDIA_TYPE_VIDEO] =
2865  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2866  if (!audio_disable)
2867  st_index[AVMEDIA_TYPE_AUDIO] =
2869  st_index[AVMEDIA_TYPE_AUDIO],
2870  st_index[AVMEDIA_TYPE_VIDEO],
2871  NULL, 0);
2873  st_index[AVMEDIA_TYPE_SUBTITLE] =
2875  st_index[AVMEDIA_TYPE_SUBTITLE],
2876  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2877  st_index[AVMEDIA_TYPE_AUDIO] :
2878  st_index[AVMEDIA_TYPE_VIDEO]),
2879  NULL, 0);
2880 
2881  is->show_mode = show_mode;
2882  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2883  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2884  AVCodecParameters *codecpar = st->codecpar;
2886  if (codecpar->width)
2887  set_default_window_size(codecpar->width, codecpar->height, sar);
2888  }
2889 
2890  /* open the streams */
2891  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2893  }
2894 
2895  ret = -1;
2896  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2898  }
2899  if (is->show_mode == SHOW_MODE_NONE)
2900  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2901 
2902  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2904  }
2905 
2906  if (is->video_stream < 0 && is->audio_stream < 0) {
2907  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2908  is->filename);
2909  ret = -1;
2910  goto fail;
2911  }
2912 
2913  if (infinite_buffer < 0 && is->realtime)
2914  infinite_buffer = 1;
2915 
2916  for (;;) {
2917  if (is->abort_request)
2918  break;
2919  if (is->paused != is->last_paused) {
2920  is->last_paused = is->paused;
2921  if (is->paused)
2922  is->read_pause_return = av_read_pause(ic);
2923  else
2924  av_read_play(ic);
2925  }
2926 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2927  if (is->paused &&
2928  (!strcmp(ic->iformat->name, "rtsp") ||
2929  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2930  /* wait 10 ms to avoid trying to get another packet */
2931  /* XXX: horrible */
2932  SDL_Delay(10);
2933  continue;
2934  }
2935 #endif
2936  if (is->seek_req) {
2937  int64_t seek_target = is->seek_pos;
2938  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2939  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2940 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2941 // of the seek_pos/seek_rel variables
2942 
2943  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2944  if (ret < 0) {
2946  "%s: error while seeking\n", is->ic->url);
2947  } else {
2948  if (is->audio_stream >= 0) {
2949  packet_queue_flush(&is->audioq);
2950  packet_queue_put(&is->audioq, &flush_pkt);
2951  }
2952  if (is->subtitle_stream >= 0) {
2953  packet_queue_flush(&is->subtitleq);
2954  packet_queue_put(&is->subtitleq, &flush_pkt);
2955  }
2956  if (is->video_stream >= 0) {
2957  packet_queue_flush(&is->videoq);
2958  packet_queue_put(&is->videoq, &flush_pkt);
2959  }
2960  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2961  set_clock(&is->extclk, NAN, 0);
2962  } else {
2963  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
2964  }
2965  }
2966  is->seek_req = 0;
2967  is->queue_attachments_req = 1;
2968  is->eof = 0;
2969  if (is->paused)
2971  }
2972  if (is->queue_attachments_req) {
2973  if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
2974  AVPacket copy = { 0 };
2975  if ((ret = av_packet_ref(&copy, &is->video_st->attached_pic)) < 0)
2976  goto fail;
2977  packet_queue_put(&is->videoq, &copy);
2978  packet_queue_put_nullpacket(&is->videoq, is->video_stream);
2979  }
2980  is->queue_attachments_req = 0;
2981  }
2982 
2983  /* if the queue are full, no need to read more */
2984  if (infinite_buffer<1 &&
2985  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2986  || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
2987  stream_has_enough_packets(is->video_st, is->video_stream, &is->videoq) &&
2988  stream_has_enough_packets(is->subtitle_st, is->subtitle_stream, &is->subtitleq)))) {
2989  /* wait 10 ms */
2990  SDL_LockMutex(wait_mutex);
2991  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2992  SDL_UnlockMutex(wait_mutex);
2993  continue;
2994  }
2995  if (!is->paused &&
2996  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
2997  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
2998  if (loop != 1 && (!loop || --loop)) {
3000  } else if (autoexit) {
3001  ret = AVERROR_EOF;
3002  goto fail;
3003  }
3004  }
3005  ret = av_read_frame(ic, pkt);
3006  if (ret < 0) {
3007  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3008  if (is->video_stream >= 0)
3009  packet_queue_put_nullpacket(&is->videoq, is->video_stream);
3010  if (is->audio_stream >= 0)
3011  packet_queue_put_nullpacket(&is->audioq, is->audio_stream);
3012  if (is->subtitle_stream >= 0)
3013  packet_queue_put_nullpacket(&is->subtitleq, is->subtitle_stream);
3014  is->eof = 1;
3015  }
3016  if (ic->pb && ic->pb->error)
3017  break;
3018  SDL_LockMutex(wait_mutex);
3019  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3020  SDL_UnlockMutex(wait_mutex);
3021  continue;
3022  } else {
3023  is->eof = 0;
3024  }
3025  /* check if packet is in play range specified by user, then queue, otherwise discard */
3026  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3027  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3028  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3029  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3031  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3032  <= ((double)duration / 1000000);
3033  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3034  packet_queue_put(&is->audioq, pkt);
3035  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3036  && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
3037  packet_queue_put(&is->videoq, pkt);
3038  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3039  packet_queue_put(&is->subtitleq, pkt);
3040  } else {
3042  }
3043  }
3044 
3045  ret = 0;
3046  fail:
3047  if (ic && !is->ic)
3048  avformat_close_input(&ic);
3049 
3050  if (ret != 0) {
3051  SDL_Event event;
3052 
3053  event.type = FF_QUIT_EVENT;
3054  event.user.data1 = is;
3055  SDL_PushEvent(&event);
3056  }
3057  SDL_DestroyMutex(wait_mutex);
3058  return 0;
3059 }
3060 
3061 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
3062 {
3063  VideoState *is;
3064 
3065  is = av_mallocz(sizeof(VideoState));
3066  if (!is)
3067  return NULL;
3068  is->last_video_stream = is->video_stream = -1;
3069  is->last_audio_stream = is->audio_stream = -1;
3070  is->last_subtitle_stream = is->subtitle_stream = -1;
3071  is->filename = av_strdup(filename);
3072  if (!is->filename)
3073  goto fail;
3074  is->iformat = iformat;
3075  is->ytop = 0;
3076  is->xleft = 0;
3077 
3078  /* start video display */
3079  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3080  goto fail;
3081  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3082  goto fail;
3083  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3084  goto fail;
3085 
3086  if (packet_queue_init(&is->videoq) < 0 ||
3087  packet_queue_init(&is->audioq) < 0 ||
3088  packet_queue_init(&is->subtitleq) < 0)
3089  goto fail;
3090 
3091  if (!(is->continue_read_thread = SDL_CreateCond())) {
3092  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3093  goto fail;
3094  }
3095 
3096  init_clock(&is->vidclk, &is->videoq.serial);
3097  init_clock(&is->audclk, &is->audioq.serial);
3098  init_clock(&is->extclk, &is->extclk.serial);
3099  is->audio_clock_serial = -1;
3100  if (startup_volume < 0)
3101  av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
3102  if (startup_volume > 100)
3103  av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
3104  startup_volume = av_clip(startup_volume, 0, 100);
3105  startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
3106  is->audio_volume = startup_volume;
3107  is->muted = 0;
3108  is->av_sync_type = av_sync_type;
3109  is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
3110  if (!is->read_tid) {
3111  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3112 fail:
3113  stream_close(is);
3114  return NULL;
3115  }
3116  return is;
3117 }
3118 
3120 {
3121  AVFormatContext *ic = is->ic;
3122  int start_index, stream_index;
3123  int old_index;
3124  AVStream *st;
3125  AVProgram *p = NULL;
3126  int nb_streams = is->ic->nb_streams;
3127 
3128  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3129  start_index = is->last_video_stream;
3130  old_index = is->video_stream;
3131  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3132  start_index = is->last_audio_stream;
3133  old_index = is->audio_stream;
3134  } else {
3135  start_index = is->last_subtitle_stream;
3136  old_index = is->subtitle_stream;
3137  }
3138  stream_index = start_index;
3139 
3140  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3141  p = av_find_program_from_stream(ic, NULL, is->video_stream);
3142  if (p) {
3144  for (start_index = 0; start_index < nb_streams; start_index++)
3145  if (p->stream_index[start_index] == stream_index)
3146  break;
3147  if (start_index == nb_streams)
3148  start_index = -1;
3149  stream_index = start_index;
3150  }
3151  }
3152 
3153  for (;;) {
3154  if (++stream_index >= nb_streams)
3155  {
3157  {
3158  stream_index = -1;
3159  is->last_subtitle_stream = -1;
3160  goto the_end;
3161  }
3162  if (start_index == -1)
3163  return;
3164  stream_index = 0;
3165  }
3166  if (stream_index == start_index)
3167  return;
3168  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3169  if (st->codecpar->codec_type == codec_type) {
3170  /* check that parameters are OK */
3171  switch (codec_type) {
3172  case AVMEDIA_TYPE_AUDIO:
3173  if (st->codecpar->sample_rate != 0 &&
3174  st->codecpar->channels != 0)
3175  goto the_end;
3176  break;
3177  case AVMEDIA_TYPE_VIDEO:
3178  case AVMEDIA_TYPE_SUBTITLE:
3179  goto the_end;
3180  default:
3181  break;
3182  }
3183  }
3184  }
3185  the_end:
3186  if (p && stream_index != -1)
3187  stream_index = p->stream_index[stream_index];
3188  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3190  old_index,
3191  stream_index);
3192 
3193  stream_component_close(is, old_index);
3194  stream_component_open(is, stream_index);
3195 }
3196 
3197 
3199 {
3201  SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
3202 }
3203 
3205 {
3206  int next = is->show_mode;
3207  do {
3208  next = (next + 1) % SHOW_MODE_NB;
3209  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3210  if (is->show_mode != next) {
3211  is->force_refresh = 1;
3212  is->show_mode = next;
3213  }
3214 }
3215 
3216 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3217  double remaining_time = 0.0;
3218  SDL_PumpEvents();
3219  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3221  SDL_ShowCursor(0);
3222  cursor_hidden = 1;
3223  }
3224  if (remaining_time > 0.0)
3225  av_usleep((int64_t)(remaining_time * 1000000.0));
3226  remaining_time = REFRESH_RATE;
3227  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3228  video_refresh(is, &remaining_time);
3229  SDL_PumpEvents();
3230  }
3231 }
3232 
3233 static void seek_chapter(VideoState *is, int incr)
3234 {
3235  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3236  int i;
3237 
3238  if (!is->ic->nb_chapters)
3239  return;
3240 
3241  /* find the current chapter */
3242  for (i = 0; i < is->ic->nb_chapters; i++) {
3243  AVChapter *ch = is->ic->chapters[i];
3244  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3245  i--;
3246  break;
3247  }
3248  }
3249 
3250  i += incr;
3251  i = FFMAX(i, 0);
3252  if (i >= is->ic->nb_chapters)
3253  return;
3254 
3255  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3256  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3257  AV_TIME_BASE_Q), 0, 0);
3258 }
3259 
3260 /* handle an event sent by the GUI */
3261 static void event_loop(VideoState *cur_stream)
3262 {
3263  SDL_Event event;
3264  double incr, pos, frac;
3265 
3266  for (;;) {
3267  double x;
3268  refresh_loop_wait_event(cur_stream, &event);
3269  switch (event.type) {
3270  case SDL_KEYDOWN:
3271  if (exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
3272  do_exit(cur_stream);
3273  break;
3274  }
3275  // If we don't yet have a window, skip all key events, because read_thread might still be initializing...
3276  if (!cur_stream->width)
3277  continue;
3278  switch (event.key.keysym.sym) {
3279  case SDLK_f:
3280  toggle_full_screen(cur_stream);
3281  cur_stream->force_refresh = 1;
3282  break;
3283  case SDLK_p:
3284  case SDLK_SPACE:
3285  toggle_pause(cur_stream);
3286  break;
3287  case SDLK_m:
3288  toggle_mute(cur_stream);
3289  break;
3290  case SDLK_KP_MULTIPLY:
3291  case SDLK_0:
3292  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3293  break;
3294  case SDLK_KP_DIVIDE:
3295  case SDLK_9:
3296  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3297  break;
3298  case SDLK_s: // S: Step to next frame
3299  step_to_next_frame(cur_stream);
3300  break;
3301  case SDLK_a:
3303  break;
3304  case SDLK_v:
3306  break;
3307  case SDLK_c:
3311  break;
3312  case SDLK_t:
3314  break;
3315  case SDLK_w:
3316 #if CONFIG_AVFILTER
3317  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3318  if (++cur_stream->vfilter_idx >= nb_vfilters)
3319  cur_stream->vfilter_idx = 0;
3320  } else {
3321  cur_stream->vfilter_idx = 0;
3322  toggle_audio_display(cur_stream);
3323  }
3324 #else
3325  toggle_audio_display(cur_stream);
3326 #endif
3327  break;
3328  case SDLK_PAGEUP:
3329  if (cur_stream->ic->nb_chapters <= 1) {
3330  incr = 600.0;
3331  goto do_seek;
3332  }
3333  seek_chapter(cur_stream, 1);
3334  break;
3335  case SDLK_PAGEDOWN:
3336  if (cur_stream->ic->nb_chapters <= 1) {
3337  incr = -600.0;
3338  goto do_seek;
3339  }
3340  seek_chapter(cur_stream, -1);
3341  break;
3342  case SDLK_LEFT:
3343  incr = seek_interval ? -seek_interval : -10.0;
3344  goto do_seek;
3345  case SDLK_RIGHT:
3346  incr = seek_interval ? seek_interval : 10.0;
3347  goto do_seek;
3348  case SDLK_UP:
3349  incr = 60.0;
3350  goto do_seek;
3351  case SDLK_DOWN:
3352  incr = -60.0;
3353  do_seek:
3354  if (seek_by_bytes) {
3355  pos = -1;
3356  if (pos < 0 && cur_stream->video_stream >= 0)
3357  pos = frame_queue_last_pos(&cur_stream->pictq);
3358  if (pos < 0 && cur_stream->audio_stream >= 0)
3359  pos = frame_queue_last_pos(&cur_stream->sampq);
3360  if (pos < 0)
3361  pos = avio_tell(cur_stream->ic->pb);
3362  if (cur_stream->ic->bit_rate)
3363  incr *= cur_stream->ic->bit_rate / 8.0;
3364  else
3365  incr *= 180000.0;
3366  pos += incr;
3367  stream_seek(cur_stream, pos, incr, 1);
3368  } else {
3369  pos = get_master_clock(cur_stream);
3370  if (isnan(pos))
3371  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3372  pos += incr;
3373  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3374  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3375  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3376  }
3377  break;
3378  default:
3379  break;
3380  }
3381  break;
3382  case SDL_MOUSEBUTTONDOWN:
3383  if (exit_on_mousedown) {
3384  do_exit(cur_stream);
3385  break;
3386  }
3387  if (event.button.button == SDL_BUTTON_LEFT) {
3388  static int64_t last_mouse_left_click = 0;
3389  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3390  toggle_full_screen(cur_stream);
3391  cur_stream->force_refresh = 1;
3392  last_mouse_left_click = 0;
3393  } else {
3394  last_mouse_left_click = av_gettime_relative();
3395  }
3396  }
3397  case SDL_MOUSEMOTION:
3398  if (cursor_hidden) {
3399  SDL_ShowCursor(1);
3400  cursor_hidden = 0;
3401  }
3403  if (event.type == SDL_MOUSEBUTTONDOWN) {
3404  if (event.button.button != SDL_BUTTON_RIGHT)
3405  break;
3406  x = event.button.x;
3407  } else {
3408  if (!(event.motion.state & SDL_BUTTON_RMASK))
3409  break;
3410  x = event.motion.x;
3411  }
3412  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3413  uint64_t size = avio_size(cur_stream->ic->pb);
3414  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3415  } else {
3416  int64_t ts;
3417  int ns, hh, mm, ss;
3418  int tns, thh, tmm, tss;
3419  tns = cur_stream->ic->duration / 1000000LL;
3420  thh = tns / 3600;
3421  tmm = (tns % 3600) / 60;
3422  tss = (tns % 60);
3423  frac = x / cur_stream->width;
3424  ns = frac * tns;
3425  hh = ns / 3600;
3426  mm = (ns % 3600) / 60;
3427  ss = (ns % 60);
3429  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3430  hh, mm, ss, thh, tmm, tss);
3431  ts = frac * cur_stream->ic->duration;
3432  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3433  ts += cur_stream->ic->start_time;
3434  stream_seek(cur_stream, ts, 0, 0);
3435  }
3436  break;
3437  case SDL_WINDOWEVENT:
3438  switch (event.window.event) {
3439  case SDL_WINDOWEVENT_SIZE_CHANGED:
3440  screen_width = cur_stream->width = event.window.data1;
3441  screen_height = cur_stream->height = event.window.data2;
3442  if (cur_stream->vis_texture) {
3443  SDL_DestroyTexture(cur_stream->vis_texture);
3444  cur_stream->vis_texture = NULL;
3445  }
3446  case SDL_WINDOWEVENT_EXPOSED:
3447  cur_stream->force_refresh = 1;
3448  }
3449  break;
3450  case SDL_QUIT:
3451  case FF_QUIT_EVENT:
3452  do_exit(cur_stream);
3453  break;
3454  default:
3455  break;
3456  }
3457  }
3458 }
3459 
3460 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3461 {
3462  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3463  return opt_default(NULL, "video_size", arg);
3464 }
3465 
3466 static int opt_width(void *optctx, const char *opt, const char *arg)
3467 {
3468  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3469  return 0;
3470 }
3471 
3472 static int opt_height(void *optctx, const char *opt, const char *arg)
3473 {
3474  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3475  return 0;
3476 }
3477 
3478 static int opt_format(void *optctx, const char *opt, const char *arg)
3479 {
3481  if (!file_iformat) {
3482  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3483  return AVERROR(EINVAL);
3484  }
3485  return 0;
3486 }
3487 
3488 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3489 {
3490  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3491  return opt_default(NULL, "pixel_format", arg);
3492 }
3493 
3494 static int opt_sync(void *optctx, const char *opt, const char *arg)
3495 {
3496  if (!strcmp(arg, "audio"))
3498  else if (!strcmp(arg, "video"))
3500  else if (!strcmp(arg, "ext"))
3502  else {
3503  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3504  exit(1);
3505  }
3506  return 0;
3507 }
3508 
3509 static int opt_seek(void *optctx, const char *opt, const char *arg)
3510 {
3511  start_time = parse_time_or_die(opt, arg, 1);
3512  return 0;
3513 }
3514 
3515 static int opt_duration(void *optctx, const char *opt, const char *arg)
3516 {
3517  duration = parse_time_or_die(opt, arg, 1);
3518  return 0;
3519 }
3520 
3521 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3522 {
3523  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3524  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3525  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3526  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3527  return 0;
3528 }
3529 
3530 static void opt_input_file(void *optctx, const char *filename)
3531 {
3532  if (input_filename) {
3534  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3535  filename, input_filename);
3536  exit(1);
3537  }
3538  if (!strcmp(filename, "-"))
3539  filename = "pipe:";
3540  input_filename = filename;
3541 }
3542 
3543 static int opt_codec(void *optctx, const char *opt, const char *arg)
3544 {
3545  const char *spec = strchr(opt, ':');
3546  if (!spec) {
3548  "No media specifier was specified in '%s' in option '%s'\n",
3549  arg, opt);
3550  return AVERROR(EINVAL);
3551  }
3552  spec++;
3553  switch (spec[0]) {
3554  case 'a' : audio_codec_name = arg; break;
3555  case 's' : subtitle_codec_name = arg; break;
3556  case 'v' : video_codec_name = arg; break;
3557  default:
3559  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3560  return AVERROR(EINVAL);
3561  }
3562  return 0;
3563 }
3564 
3565 static int dummy;
3566 
3567 static const OptionDef options[] = {
3569  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3570  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3571  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3572  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3573  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3574  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3575  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3576  { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3577  { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3578  { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3579  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3580  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3581  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3582  { "seek_interval", OPT_FLOAT | HAS_ARG, { &seek_interval }, "set seek interval for left/right keys, in seconds", "seconds" },
3583  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3584  { "noborder", OPT_BOOL, { &borderless }, "borderless window" },
3585  { "alwaysontop", OPT_BOOL, { &alwaysontop }, "window always on top" },
3586  { "volume", OPT_INT | HAS_ARG, { &startup_volume}, "set startup volume 0=min 100=max", "volume" },
3587  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3588  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3589  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3590  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3591  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3592  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3593  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3594  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3595  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3596  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3597  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3598  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3599  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3600  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3601  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3602  { "left", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_left }, "set the x position for the left of the window", "x pos" },
3603  { "top", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_top }, "set the y position for the top of the window", "y pos" },
3604 #if CONFIG_AVFILTER
3605  { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3606  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3607 #endif
3608  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3609  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3610  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3611  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3612  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3613  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3614  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3615  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3616  { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3617  { "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
3618  "read and decode the streams to fill missing information with heuristics" },
3619  { "filter_threads", HAS_ARG | OPT_INT | OPT_EXPERT, { &filter_nbthreads }, "number of filter threads per graph" },
3620  { NULL, },
3621 };
3622 
3623 static void show_usage(void)
3624 {
3625  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3626  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3627  av_log(NULL, AV_LOG_INFO, "\n");
3628 }
3629 
3630 void show_help_default(const char *opt, const char *arg)
3631 {
3633  show_usage();
3634  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3635  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3636  printf("\n");
3639 #if !CONFIG_AVFILTER
3641 #else
3643 #endif
3644  printf("\nWhile playing:\n"
3645  "q, ESC quit\n"
3646  "f toggle full screen\n"
3647  "p, SPC pause\n"
3648  "m toggle mute\n"
3649  "9, 0 decrease and increase volume respectively\n"
3650  "/, * decrease and increase volume respectively\n"
3651  "a cycle audio channel in the current program\n"
3652  "v cycle video channel\n"
3653  "t cycle subtitle channel in the current program\n"
3654  "c cycle program\n"
3655  "w cycle video filters or show modes\n"
3656  "s activate frame-step mode\n"
3657  "left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
3658  "down/up seek backward/forward 1 minute\n"
3659  "page down/page up seek backward/forward 10 minutes\n"
3660  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3661  "left double-click toggle full screen\n"
3662  );
3663 }
3664 
3665 /* Called from the main */
3666 int main(int argc, char **argv)
3667 {
3668  int flags;
3669  VideoState *is;
3670 
3671  init_dynload();
3672 
3674  parse_loglevel(argc, argv, options);
3675 
3676  /* register all codecs, demux and protocols */
3677 #if CONFIG_AVDEVICE
3679 #endif
3681 
3682  init_opts();
3683 
3684  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3685  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3686 
3687  show_banner(argc, argv, options);
3688 
3689  parse_options(NULL, argc, argv, options, opt_input_file);
3690 
3691  if (!input_filename) {
3692  show_usage();
3693  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3695  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3696  exit(1);
3697  }
3698 
3699  if (display_disable) {
3700  video_disable = 1;
3701  }
3702  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3703  if (audio_disable)
3704  flags &= ~SDL_INIT_AUDIO;
3705  else {
3706  /* Try to work around an occasional ALSA buffer underflow issue when the
3707  * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3708  if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3709  SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
3710  }
3711  if (display_disable)
3712  flags &= ~SDL_INIT_VIDEO;
3713  if (SDL_Init (flags)) {
3714  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3715  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3716  exit(1);
3717  }
3718 
3719  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3720  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3721 
3724 
3725  if (!display_disable) {
3726  int flags = SDL_WINDOW_HIDDEN;
3727  if (alwaysontop)
3728 #if SDL_VERSION_ATLEAST(2,0,5)
3729  flags |= SDL_WINDOW_ALWAYS_ON_TOP;
3730 #else
3731  av_log(NULL, AV_LOG_WARNING, "Your SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.\n");
3732 #endif
3733  if (borderless)
3734  flags |= SDL_WINDOW_BORDERLESS;
3735  else
3736  flags |= SDL_WINDOW_RESIZABLE;
3737  window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
3738  SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
3739  if (window) {
3740  renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
3741  if (!renderer) {
3742  av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
3743  renderer = SDL_CreateRenderer(window, -1, 0);
3744  }
3745  if (renderer) {
3746  if (!SDL_GetRendererInfo(renderer, &renderer_info))
3747  av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", renderer_info.name);
3748  }
3749  }
3750  if (!window || !renderer || !renderer_info.num_texture_formats) {
3751  av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError());
3752  do_exit(NULL);
3753  }
3754  }
3755 
3757  if (!is) {
3758  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3759  do_exit(NULL);
3760  }
3761 
3762  event_loop(is);
3763 
3764  /* never returns */
3765 
3766  return 0;
3767 }
OPT_FLOAT
#define OPT_FLOAT
Definition: cmdutils.h:168
AVSubtitle
Definition: avcodec.h:3933
rect::w
int w
Definition: f_ebur128.c:91
sws_getCachedContext
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2371
do_exit
static void do_exit(VideoState *is)
Definition: ffplay.c:1298
VideoState::seek_rel
int64_t seek_rel
Definition: ffplay.c:214
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:599
AVCodec
AVCodec.
Definition: avcodec.h:3481
opt_frame_size
static int opt_frame_size(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3460
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
VideoState::video_st
AVStream * video_st
Definition: ffplay.c:284
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
Frame::width
int width
Definition: ffplay.c:161
AVFMT_NO_BYTE_SEEK
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:475
AudioParams::fmt
enum AVSampleFormat fmt
Definition: ffplay.c:138
av_sync_type
static int av_sync_type
Definition: ffplay.c:330
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
renderer_info
static SDL_RendererInfo renderer_info
Definition: ffplay.c:369
configure_filtergraph
int configure_filtergraph(FilterGraph *fg)
Definition: ffmpeg_filter.c:1005
frame_queue_nb_remaining
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:803
frame_queue_next
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:787
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: avcodec.h:3953
Decoder::finished
int finished
Definition: ffplay.c:193
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:869
AVCodecContext::channel_layout
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2276
av_compare_ts
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
AVProgram::nb_stream_indexes
unsigned int nb_stream_indexes
Definition: avformat.h:1269
frame_queue_last_pos
static int64_t frame_queue_last_pos(FrameQueue *f)
Definition: ffplay.c:809
out
FILE * out
Definition: movenc.c:54
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:2225
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:99
video_thread
static int video_thread(void *arg)
Definition: ffplay.c:2120
VideoState::av_sync_type
int av_sync_type
Definition: ffplay.c:233
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
set_default_window_size
static void set_default_window_size(int width, int height, AVRational sar)
Definition: ffplay.c:1324
AV_PIX_FMT_BGR32
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:362
AV_NOSYNC_THRESHOLD
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:86
AVFormatContext::nb_chapters
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1571
AVCodecParameters
This struct describes the properties of an encoded stream.
Definition: avcodec.h:3949
n
int n
Definition: avisynth_c.h:760
AV_LOG_QUIET
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
FrameQueue::keep_last
int keep_last
Definition: ffplay.c:175
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:686
VideoState::audio_hw_buf_size
int audio_hw_buf_size
Definition: ffplay.c:243
decoder_decode_frame
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
Definition: ffplay.c:590
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
AV_DISPOSITION_ATTACHED_PIC
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:838
FrameQueue::cond
SDL_cond * cond
Definition: ffplay.c:178
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:119
AVStream::discard
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:925
av_find_program_from_stream
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: utils.c:4190
display_disable
static int display_disable
Definition: ffplay.c:325
screen_width
static int screen_width
Definition: ffplay.c:315
DEBUG
#define DEBUG
Definition: vf_framerate.c:29
sws_dict
AVDictionary * sws_dict
Definition: cmdutils.c:71
swr_set_compensation
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:897
SAMPLE_ARRAY_SIZE
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:104
rect
Definition: f_ebur128.c:91
update_volume
static void update_volume(VideoState *is, int sign, double step)
Definition: ffplay.c:1510
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
VideoState::auddec
Decoder auddec
Definition: ffplay.c:227
ch
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
screen_left
static int screen_left
Definition: ffplay.c:317
av_opt_set_int_list
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:707
AudioParams::frame_size
int frame_size
Definition: ffplay.c:139
AVSubtitleRect
Definition: avcodec.h:3898
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
Decoder::next_pts
int64_t next_pts
Definition: ffplay.c:198
decoder_start
static int decoder_start(Decoder *d, int(*fn)(void *), const char *thread_name, void *arg)
Definition: ffplay.c:2109
rect::y
int y
Definition: f_ebur128.c:91
FrameQueue::size
int size
Definition: ffplay.c:173
avformat_get_class
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:168
av_unused
#define av_unused
Definition: attributes.h:125
Frame::sar
AVRational sar
Definition: ffplay.c:164
AudioParams::channel_layout
int64_t channel_layout
Definition: ffplay.c:137
out_size
int out_size
Definition: movenc.c:55
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
upload_texture
static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx)
Definition: ffplay.c:907
end
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
VideoState::vis_texture
SDL_Texture * vis_texture
Definition: ffplay.c:272
queue_picture
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
Definition: ffplay.c:1732
av_get_channel_layout_string
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
Definition: channel_layout.c:211
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
AudioParams
Definition: ffplay.c:134
VideoState::subtitle_st
AVStream * subtitle_st
Definition: ffplay.c:277
AV_SYNC_VIDEO_MASTER
@ AV_SYNC_VIDEO_MASTER
Definition: ffplay.c:184
pixdesc.h
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1410
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
VideoState::frame_last_filter_delay
double frame_last_filter_delay
Definition: ffplay.c:282
AVFrame::width
int width
Definition: frame.h:353
VideoState::xleft
int xleft
Definition: ffplay.c:292
Frame::pts
double pts
Definition: ffplay.c:158
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:522
pixels
int pixels
Definition: avisynth_c.h:390
OPT_INPUT
#define OPT_INPUT
Definition: cmdutils.h:178
avcodec_decode_subtitle2
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:1070
frame_queue_init
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
Definition: ffplay.c:690
subtitle_codec_name
static const char * subtitle_codec_name
Definition: ffplay.c:345
AVPacket::data
uint8_t * data
Definition: avcodec.h:1477
EXTERNAL_CLOCK_MIN_FRAMES
#define EXTERNAL_CLOCK_MIN_FRAMES
Definition: ffplay.c:68
HAS_ARG
#define HAS_ARG
Definition: cmdutils.h:161
b
#define b
Definition: input.c:41
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:150
Clock
Definition: ffplay.c:143
data
const char data[16]
Definition: mxf.c:91
frame_queue_destory
static void frame_queue_destory(FrameQueue *f)
Definition: ffplay.c:711
SAMPLE_QUEUE_SIZE
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:131
program_name
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:63
Decoder::queue
PacketQueue * queue
Definition: ffplay.c:190
format_opts
AVDictionary * format_opts
Definition: cmdutils.c:73
AVSEEK_FLAG_BYTE
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2496
AVIOContext::error
int error
contains the error code or 0 if no error happened
Definition: avio.h:245
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:70
packet_queue_put_nullpacket
static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
Definition: ffplay.c:470
AV_PIX_FMT_RGB32_1
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:361
VideoState::audio_diff_avg_coef
double audio_diff_avg_coef
Definition: ffplay.c:238
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
CURSOR_HIDE_DELAY
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:106
SDL_VOLUME_STEP
#define SDL_VOLUME_STEP
Definition: ffplay.c:77
show_help_children
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
Definition: cmdutils.c:206
channels
channels
Definition: aptx.c:30
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
autorotate
static int autorotate
Definition: ffplay.c:355
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1495
TextureFormatEntry::texture_fmt
int texture_fmt
Definition: ffplay.c:374
video_disable
static int video_disable
Definition: ffplay.c:320
Frame::uploaded
int uploaded
Definition: ffplay.c:165
mathematics.h
sws_scale
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:759
AVDictionary
Definition: dict.c:30
compute_target_delay
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1525
decoder_init
static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
Definition: ffplay.c:581
Frame
Definition: ffplay.c:154
stream_close
static void stream_close(VideoState *is)
Definition: ffplay.c:1261
av_read_frame
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1785
SDL_AUDIO_MAX_CALLBACKS_PER_SEC
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:74
AVFMT_NOBINSEARCH
#define AVFMT_NOBINSEARCH
Format does not allow to fall back on binary search via read_timestamp.
Definition: avformat.h:473
VideoState::paused
int paused
Definition: ffplay.c:208
init_clock
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1406
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
AV_OPT_FLAG_FILTERING_PARAM
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:291
opt_seek
static int opt_seek(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3509
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:336
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:101
VideoState::iformat
AVInputFormat * iformat
Definition: ffplay.c:205
get_master_clock
static double get_master_clock(VideoState *is)
Definition: ffplay.c:1439
VideoState::width
int width
Definition: ffplay.c:292
sample_rate
sample_rate
Definition: ffmpeg_filter.c:191
dummy
static int dummy
Definition: ffplay.c:3565
update_video_pts
static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial)
Definition: ffplay.c:1567
FF_QUIT_EVENT
#define FF_QUIT_EVENT
Definition: ffplay.c:365
PacketQueue
Definition: ffplay.c:118
AV_SYNC_EXTERNAL_CLOCK
@ AV_SYNC_EXTERNAL_CLOCK
Definition: ffplay.c:185
subtitle_thread
static int subtitle_thread(void *arg)
Definition: ffplay.c:2227
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
VideoState::last_subtitle_stream
int last_subtitle_stream
Definition: ffplay.c:304
VideoState::SHOW_MODE_NONE
@ SHOW_MODE_NONE
Definition: ffplay.c:262
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:120
OptionDef
Definition: cmdutils.h:158
audio_decode_frame
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2330
subtitle_disable
static int subtitle_disable
Definition: ffplay.c:321
VideoState::pictq
FrameQueue pictq
Definition: ffplay.c:223
genpts
static int genpts
Definition: ffplay.c:334
VideoState::swr_ctx
struct SwrContext * swr_ctx
Definition: ffplay.c:257
opt_sync
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3494
step_to_next_frame
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1517
VideoState::sampq
FrameQueue sampq
Definition: ffplay.c:225
TextureFormatEntry::format
enum AVPixelFormat format
Definition: ffplay.c:373
FrameQueue::rindex
int rindex
Definition: ffplay.c:171
video_display
static void video_display(VideoState *is)
Definition: ffplay.c:1360
AVCodec::max_lowres
uint8_t max_lowres
maximum value for lowres supported by the decoder
Definition: avcodec.h:3506
avformat_close_input
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:4452
parse_number_or_die
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max)
Parse a string and return its corresponding value as a double.
Definition: cmdutils.c:144
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:502
AVFormatContext::interrupt_callback
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1620
SDL_AUDIO_MIN_BUFFER_SIZE
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:72
print_error
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1081
startup_volume
static int startup_volume
Definition: ffplay.c:328
window
static SDL_Window * window
Definition: ffplay.c:367
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
Definition: avfiltergraph.c:142
AVCodecParameters::channels
int channels
Audio only.
Definition: avcodec.h:4063
toggle_full_screen
static void toggle_full_screen(VideoState *is)
Definition: ffplay.c:3198
Clock::queue_serial
int * queue_serial
Definition: ffplay.c:150
AVFormatContext::iformat
ff_const59 struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1354
VideoState::extclk
Clock extclk
Definition: ffplay.c:221
VideoState::seek_flags
int seek_flags
Definition: ffplay.c:212
alwaysontop
static int alwaysontop
Definition: ffplay.c:327
VideoState::audio_st
AVStream * audio_st
Definition: ffplay.c:241
packet_queue_init
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:481
AUDIO_DIFF_AVG_NB
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:97
fail
#define fail()
Definition: checkasm.h:120
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:83
opt_duration
static int opt_duration(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3515
FrameQueue
Definition: ffplay.c:169
packet_queue_put
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:456
samplefmt.h
AVSubtitleRect::x
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3899
VideoState::video_stream
int video_stream
Definition: ffplay.c:283
autoexit
static int autoexit
Definition: ffplay.c:337
avio_tell
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:557
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:61
AVChapter
Definition: avformat.h:1299
video_image_display
static void video_image_display(VideoState *is)
Definition: ffplay.c:974
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
opt_show_mode
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3521
Decoder::empty_queue_cond
SDL_cond * empty_queue_cond
Definition: ffplay.c:195
pts
static int64_t pts
Definition: transcode_aac.c:647
set_clock_speed
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1400
VideoState::audio_diff_threshold
double audio_diff_threshold
Definition: ffplay.c:239
sws_get_class
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
Definition: options.c:95
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:449
VideoState::audio_diff_cum
double audio_diff_cum
Definition: ffplay.c:237
VideoState::last_video_stream
int last_video_stream
Definition: ffplay.c:304
OPT_STRING
#define OPT_STRING
Definition: cmdutils.h:164
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
fast
static int fast
Definition: ffplay.c:333
fn
#define fn(a)
Definition: colorspacedsp_template.c:42
loop
static int loop
Definition: ffplay.c:340
AVRational::num
int num
Numerator.
Definition: rational.h:59
avformat_network_init
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:5021
VideoState::rdft_bits
int rdft_bits
Definition: ffplay.c:268
opt_height
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3472
avsubtitle_free
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1098
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
sdl_texture_format_map
static const struct TextureFormatEntry sdl_texture_format_map[]
AVFormatContext::bit_rate
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1464
is_full_screen
static int is_full_screen
Definition: ffplay.c:360
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:203
avassert.h
set_sdl_yuv_conversion_mode
static void set_sdl_yuv_conversion_mode(AVFrame *frame)
Definition: ffplay.c:958
lrint
#define lrint
Definition: tablegen.h:53
Frame::flip_v
int flip_v
Definition: ffplay.c:166
av_guess_sample_aspect_ratio
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio.
Definition: utils.c:5084
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:202
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
AVFormatContext::metadata
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1582
AVInputFormat
Definition: avformat.h:640
audio_thread
static int audio_thread(void *arg)
Definition: ffplay.c:2022
set_clock
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1394
av_dump_format
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
Definition: dump.c:571
VideoState
Definition: ffplay.c:203
frame_queue_peek_next
static Frame * frame_queue_peek_next(FrameQueue *f)
Definition: ffplay.c:735
sdl_audio_callback
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
Definition: ffplay.c:2443
sync_clock_to_slave
static void sync_clock_to_slave(Clock *c, Clock *slave)
Definition: ffplay.c:1414
swr_init
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:152
opt_input_file
static void opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3530
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
frame_queue_signal
static void frame_queue_signal(FrameQueue *f)
Definition: ffplay.c:723
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
filter_codec_opts
AVDictionary * filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, AVCodec *codec)
Filter out options for given codec.
Definition: cmdutils.c:2079
OPT_INT
#define OPT_INT
Definition: cmdutils.h:167
VideoState::img_convert_ctx
struct SwsContext * img_convert_ctx
Definition: ffplay.c:287
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:156
width
#define width
VideoState::ShowMode
ShowMode
Definition: ffplay.c:261
Decoder::avctx
AVCodecContext * avctx
Definition: ffplay.c:191
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_OPT_FLAG_ENCODING_PARAM
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
Definition: opt.h:276
show_help_default
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3630
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
default_height
static int default_height
Definition: ffplay.c:314
AVFormatContext::flags
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1473
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
AVCodecParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only.
Definition: avcodec.h:4033
AVFrame::channels
int channels
number of audio channels, only used for audio.
Definition: frame.h:601
AVInputFormat::name
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:645
AV_PIX_FMT_0BGR32
#define AV_PIX_FMT_0BGR32
Definition: pixfmt.h:365
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:503
AVDictionaryEntry::key
char * key
Definition: dict.h:82
Clock::last_updated
double last_updated
Definition: ffplay.c:146
PacketQueue::duration
int64_t duration
Definition: ffplay.c:122
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
AVSubtitleRect::y
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3900
AVCodecParameters::width
int width
Video only.
Definition: avcodec.h:4023
calculate_display_rect
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
Definition: ffplay.c:862
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
screen_height
static int screen_height
Definition: ffplay.c:316
EXTERNAL_CLOCK_SPEED_STEP
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:94
AV_CH_LAYOUT_STEREO_DOWNMIX
#define AV_CH_LAYOUT_STEREO_DOWNMIX
Definition: channel_layout.h:112
Decoder::pkt_serial
int pkt_serial
Definition: ffplay.c:192
opt_frame_pix_fmt
static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3488
avcodec_receive_frame
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:740
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
AVMEDIA_TYPE_NB
@ AVMEDIA_TYPE_NB
Definition: avutil.h:206
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
av_read_play
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: utils.c:4275
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
opt_codec
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3543
Clock::pts_drift
double pts_drift
Definition: ffplay.c:145
swr_convert
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t *out_arg[SWR_CH_MAX], int out_count, const uint8_t *in_arg[SWR_CH_MAX], int in_count)
Definition: swresample.c:714
VideoState::videoq
PacketQueue videoq
Definition: ffplay.c:285
ctx
AVFormatContext * ctx
Definition: movenc.c:48
limits.h
REFRESH_RATE
#define REFRESH_RATE
Definition: ffplay.c:100
FrameQueue::rindex_shown
int rindex_shown
Definition: ffplay.c:176
nb_streams
static int nb_streams
Definition: ffprobe.c:280
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
VideoState::force_refresh
int force_refresh
Definition: ffplay.c:207
get_clock
static double get_clock(Clock *c)
Definition: ffplay.c:1374
screen_top
static int screen_top
Definition: ffplay.c:318
VideoState::audio_diff_avg_count
int audio_diff_avg_count
Definition: ffplay.c:240
EXTERNAL_CLOCK_SPEED_MIN
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:92
avcodec_get_name
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:1183
PacketQueue::last_pkt
MyAVPacketList * last_pkt
Definition: ffplay.c:119
sws_flags
static unsigned sws_flags
Definition: ffplay.c:110
parse_options
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:383
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
renderer
static SDL_Renderer * renderer
Definition: ffplay.c:368
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
SwrContext
The libswresample context.
Definition: swresample_internal.h:95
vp_duration
static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp)
Definition: ffplay.c:1555
swr_alloc_set_opts
struct SwrContext * swr_alloc_set_opts(struct SwrContext *s, int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
Definition: swresample.c:59
NAN
#define NAN
Definition: mathematics.h:64
f
#define f(width, name)
Definition: cbs_vp9.c:255
AV_PIX_FMT_BGR32_1
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:363
VideoState::step
int step
Definition: ffplay.c:293
av_rdft_calc
void av_rdft_calc(RDFTContext *s, FFTSample *data)
synchronize_audio
static int synchronize_audio(VideoState *is, int nb_samples)
Definition: ffplay.c:2282
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:1575
arg
const char * arg
Definition: jacosubdec.c:66
if
if(ret)
Definition: filter_design.txt:179
window_title
static const char * window_title
Definition: ffplay.c:312
Clock::speed
double speed
Definition: ffplay.c:147
VideoState::SHOW_MODE_VIDEO
@ SHOW_MODE_VIDEO
Definition: ffplay.c:262
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:811
AVFormatContext
Format I/O context.
Definition: avformat.h:1342
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:485
init_dynload
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:120
opts
AVDictionary * opts
Definition: movenc.c:50
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1017
AVSubtitleRect::w
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:3901
seek_chapter
static void seek_chapter(VideoState *is, int incr)
Definition: ffplay.c:3233
get_master_sync_type
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1422
avcodec_get_class
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:294
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1267
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:899
NULL
#define NULL
Definition: coverity.c:32
stream_open
static VideoState * stream_open(const char *filename, AVInputFormat *iformat)
Definition: ffplay.c:3061
FrameQueue::max_size
int max_size
Definition: ffplay.c:174
OPT_EXPERT
#define OPT_EXPERT
Definition: cmdutils.h:163
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
Decoder
Definition: ffplay.c:188
AudioParams::freq
int freq
Definition: ffplay.c:135
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:171
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
isnan
#define isnan(x)
Definition: libm.h:340
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:842
stream_cycle_channel
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3119
VideoState::frame_drops_late
int frame_drops_late
Definition: ffplay.c:259
AV_CODEC_FLAG2_FAST
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:923
VideoState::rdft
RDFTContext * rdft
Definition: ffplay.c:267
AVFormatContext::pb
AVIOContext * pb
I/O context.
Definition: avformat.h:1384
av_log_set_flags
void av_log_set_flags(int arg)
Definition: log.c:390
parseutils.h
frame_queue_unref_item
static void frame_queue_unref_item(Frame *vp)
Definition: ffplay.c:684
FrameQueue::queue
Frame queue[FRAME_QUEUE_SIZE]
Definition: ffplay.c:170
VideoState::last_i_start
int last_i_start
Definition: ffplay.c:266
init_opts
void init_opts(void)
Initialize the cmdutils option system, in particular allocate the *_opts contexts.
Definition: cmdutils.c:85
Decoder::packet_pending
int packet_pending
Definition: ffplay.c:194
cursor_last_shown
static int64_t cursor_last_shown
Definition: ffplay.c:348
AVProgram::stream_index
unsigned int * stream_index
Definition: avformat.h:1268
frame_queue_peek
static Frame * frame_queue_peek(FrameQueue *f)
Definition: ffplay.c:730
OPT_INT64
#define OPT_INT64
Definition: cmdutils.h:170
avfilter_inout_alloc
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
Definition: graphparser.c:198
Frame::duration
double duration
Definition: ffplay.c:159
DFT_R2C
@ DFT_R2C
Definition: avfft.h:72
lowres
static int lowres
Definition: ffplay.c:335
AV_DICT_DONT_OVERWRITE
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:76
AV_PIX_FMT_RGB8
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
Definition: pixfmt.h:86
avcodec_open2
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:565
time.h
Frame::pos
int64_t pos
Definition: ffplay.c:160
FFTSample
float FFTSample
Definition: avfft.h:35
avfft.h
VideoState::frame_last_returned_time
double frame_last_returned_time
Definition: ffplay.c:281
set_clock_at
static void set_clock_at(Clock *c, double pts, int serial, double time)
Definition: ffplay.c:1386
toggle_pause
static void toggle_pause(VideoState *is)
Definition: ffplay.c:1499
TextureFormatEntry
Definition: ffplay.c:372
AVFilterGraph
Definition: avfilter.h:840
stream_component_open
static int stream_component_open(VideoState *is, int stream_index)
Definition: ffplay.c:2560
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
av_buffersink_get_channel_layout
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
av_packet_ref
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:608
fp
#define fp
Definition: regdef.h:44
av_packet_move_ref
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:655
AV_PIX_FMT_NE
#define AV_PIX_FMT_NE(be, le)
Definition: pixfmt.h:357
exp
int8_t exp
Definition: eval.c:72
VideoState::seek_req
int seek_req
Definition: ffplay.c:211
VideoState::SHOW_MODE_WAVES
@ SHOW_MODE_WAVES
Definition: ffplay.c:262
VideoState::audio_clock
double audio_clock
Definition: ffplay.c:235
VideoState::read_pause_return
int read_pause_return
Definition: ffplay.c:215
event_loop
static void event_loop(VideoState *cur_stream)
Definition: ffplay.c:3261
swresample.h
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
VideoState::ytop
int ytop
Definition: ffplay.c:292
AVCodecParameters::sample_rate
int sample_rate
Audio only.
Definition: avcodec.h:4067
av_get_channel_layout_nb_channels
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
Definition: channel_layout.c:220
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:568
VideoState::sample_array
int16_t sample_array[SAMPLE_ARRAY_SIZE]
Definition: ffplay.c:264
AVFormatContext::nb_streams
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1398
exit_on_mousedown
static int exit_on_mousedown
Definition: ffplay.c:339
AVFilterGraph::scale_sws_opts
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:845
Decoder::next_pts_tb
AVRational next_pts_tb
Definition: ffplay.c:199
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:2804
VideoState::audioq
PacketQueue audioq
Definition: ffplay.c:242
codec_opts
AVDictionary * codec_opts
Definition: cmdutils.c:73
audio_callback_time
static int64_t audio_callback_time
Definition: ffplay.c:361
eval.h
avformat_find_stream_info
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: utils.c:3588
Frame::format
int format
Definition: ffplay.c:163
get_valid_channel_layout
static int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
Definition: ffplay.c:419
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:1652
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:378
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AVMediaType
AVMediaType
Definition: avutil.h:199
av_log_set_callback
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:400
AVPacket::size
int size
Definition: avcodec.h:1478
avformat_match_stream_specifier
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
Definition: utils.c:5291
VideoState::audio_write_buf_size
int audio_write_buf_size
Definition: ffplay.c:249
av_rdft_init
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
copy
static void copy(const float *p1, float *p2, const int length)
Definition: vf_vaguedenoiser.c:185
avformat_alloc_context
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:144
AVDISCARD_DEFAULT
@ AVDISCARD_DEFAULT
discard useless packets like 0 size packets in avi
Definition: avcodec.h:806
FrameQueue::mutex
SDL_mutex * mutex
Definition: ffplay.c:177
frame_queue_peek_writable
static Frame * frame_queue_peek_writable(FrameQueue *f)
Definition: ffplay.c:745
OPT_AUDIO
#define OPT_AUDIO
Definition: cmdutils.h:166
Frame::sub
AVSubtitle sub
Definition: ffplay.c:156
VideoState::last_audio_stream
int last_audio_stream
Definition: ffplay.c:304
Decoder::pkt
AVPacket pkt
Definition: ffplay.c:189
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:467
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:135
sp
#define sp
Definition: regdef.h:63
start_time
static int64_t start_time
Definition: ffplay.c:331
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
VideoState::SHOW_MODE_NB
@ SHOW_MODE_NB
Definition: ffplay.c:262
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2233
Frame::serial
int serial
Definition: ffplay.c:157
AVCodecContext::pkt_timebase
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:3119
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
hypot
static av_const double hypot(double x, double y)
Definition: libm.h:366
uninit_opts
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents.
Definition: cmdutils.c:90
size
int size
Definition: twinvq_data.h:11134
VideoState::xpos
int xpos
Definition: ffplay.c:270
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
read_thread
static int read_thread(void *arg)
Definition: ffplay.c:2742
AV_PIX_FMT_BGR555
#define AV_PIX_FMT_BGR555
Definition: pixfmt.h:380
avformat_seek_file
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
Definition: utils.c:2548
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
Clock::paused
int paused
Definition: ffplay.c:149
rect::h
int h
Definition: f_ebur128.c:91
VideoState::sub_texture
SDL_Texture * sub_texture
Definition: ffplay.c:273
av_guess_frame_rate
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: utils.c:5107
swr_free
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
Definition: swresample.c:137
VideoState::vid_texture
SDL_Texture * vid_texture
Definition: ffplay.c:274
setup_find_stream_info_opts
AVDictionary ** setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts)
Setup AVCodecContext options for avformat_find_stream_info().
Definition: cmdutils.c:2136
printf
printf("static const uint8_t my_array[100] = {\n")
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:368
VideoState::sample_array_index
int sample_array_index
Definition: ffplay.c:265
wanted_stream_spec
static const char * wanted_stream_spec[AVMEDIA_TYPE_NB]
Definition: ffplay.c:322
infinite_buffer
static int infinite_buffer
Definition: ffplay.c:342
val
const char const char void * val
Definition: avisynth_c.h:863
VideoState::max_frame_duration
double max_frame_duration
Definition: ffplay.c:286
avdevice.h
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:556
avcodec_find_decoder
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: allcodecs.c:890
packet_queue_destroy
static void packet_queue_destroy(PacketQueue *q)
Definition: ffplay.c:516
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: avcodec.h:1476
height
#define height
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:360
VideoState::frame_drops_early
int frame_drops_early
Definition: ffplay.c:258
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
AVFrame::channel_layout
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:472
toggle_mute
static void toggle_mute(VideoState *is)
Definition: ffplay.c:1505
av_format_inject_global_side_data
void av_format_inject_global_side_data(AVFormatContext *s)
This function will cause global side data to be injected in the next packet of each stream as well as...
Definition: utils.c:154
decoder_abort
static void decoder_abort(Decoder *d, FrameQueue *fq)
Definition: ffplay.c:818
video_refresh
static void video_refresh(void *opaque, double *remaining_time)
Definition: ffplay.c:1574
ns
#define ns(max_value, name, subs,...)
Definition: cbs_av1.c:686
seek_interval
static float seek_interval
Definition: ffplay.c:324
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
rect::x
int x
Definition: f_ebur128.c:91
VideoState::seek_pos
int64_t seek_pos
Definition: ffplay.c:213
frame_queue_push
static void frame_queue_push(FrameQueue *f)
Definition: ffplay.c:777
audio_dev
static SDL_AudioDeviceID audio_dev
Definition: ffplay.c:370
sigterm_handler
static void sigterm_handler(int sig)
Definition: ffplay.c:1319
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
sample_rates
sample_rates
Definition: ffmpeg_filter.c:191
packet_queue_abort
static void packet_queue_abort(PacketQueue *q)
Definition: ffplay.c:523
video_codec_name
static const char * video_codec_name
Definition: ffplay.c:346
AVCodecContext::channels
int channels
number of audio channels
Definition: avcodec.h:2226
buffersink.h
AVCodec::id
enum AVCodecID id
Definition: avcodec.h:3495
show_banner
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1180
packet_queue_flush
static void packet_queue_flush(PacketQueue *q)
Definition: ffplay.c:498
packet_queue_get
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
Definition: ffplay.c:543
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
MAX_QUEUE_SIZE
#define MAX_QUEUE_SIZE
Definition: ffplay.c:66
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
functionally identical to above
Definition: pixfmt.h:504
MIN_FRAMES
#define MIN_FRAMES
Definition: ffplay.c:67
VideoState::queue_attachments_req
int queue_attachments_req
Definition: ffplay.c:210
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
avcodec_send_packet
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:677
av_find_input_format
ff_const59 AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
Definition: format.c:118
FrameQueue::windex
int windex
Definition: ffplay.c:172
VideoState::filename
char * filename
Definition: ffplay.c:291
VideoState::muted
int muted
Definition: ffplay.c:251
Decoder::start_pts
int64_t start_pts
Definition: ffplay.c:196
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:361
Clock::pts
double pts
Definition: ffplay.c:144
VIDEO_PICTURE_QUEUE_SIZE
#define VIDEO_PICTURE_QUEUE_SIZE
Definition: ffplay.c:129
PacketQueue::serial
int serial
Definition: ffplay.c:124
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1470
VideoState::show_mode
enum VideoState::ShowMode show_mode
VideoState::audio_src
struct AudioParams audio_src
Definition: ffplay.c:252
program_birth_year
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffplay.c:64
avformat_open_input
int avformat_open_input(AVFormatContext **ps, const char *url, ff_const59 AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: utils.c:540
VideoState::audio_buf1
uint8_t * audio_buf1
Definition: ffplay.c:245
av_get_bytes_per_sample
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:106
avfilter_graph_parse_ptr
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs, void *log_ctx)
Add a graph described by a string to a graph.
Definition: graphparser.c:538
swr_opts
AVDictionary * swr_opts
Definition: cmdutils.c:72
compute_mod
static int compute_mod(int a, int b)
Definition: ffplay.c:1057
Decoder::start_pts_tb
AVRational start_pts_tb
Definition: ffplay.c:197
VideoState::rdft_data
FFTSample * rdft_data
Definition: ffplay.c:269
AVCodecParameters::height
int height
Definition: avcodec.h:4024
avcodec_parameters_to_context
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: utils.c:2155
AV_TIME_BASE
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:342
file_iformat
static AVInputFormat * file_iformat
Definition: ffplay.c:310
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
video_open
static int video_open(VideoState *is)
Definition: ffplay.c:1336
RDFTContext
Definition: rdft.h:28
get_sdl_pix_fmt_and_blendmode
static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
Definition: ffplay.c:889
show_status
static int show_status
Definition: ffplay.c:329
opt_format
static int opt_format(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3478
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
AV_PIX_FMT_RGB555
#define AV_PIX_FMT_RGB555
Definition: pixfmt.h:375
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:277
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:582
borderless
static int borderless
Definition: ffplay.c:326
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
update_sample_display
static void update_sample_display(VideoState *is, short *samples, int samples_size)
Definition: ffplay.c:2262
MyAVPacketList
Definition: ffplay.c:112
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:61
tb
#define tb
Definition: regdef.h:68
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
AVProgram
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1264
av_read_pause
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
Definition: utils.c:4284
len
int len
Definition: vorbis_enc_data.h:452
Frame::frame
AVFrame * frame
Definition: ffplay.c:155
AV_PIX_FMT_BGR565
#define AV_PIX_FMT_BGR565
Definition: pixfmt.h:379
av_samples_get_buffer_size
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
PacketQueue::nb_packets
int nb_packets
Definition: ffplay.c:120
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
PacketQueue::first_pkt
MyAVPacketList * first_pkt
Definition: ffplay.c:119
FRAME_QUEUE_SIZE
#define FRAME_QUEUE_SIZE
Definition: ffplay.c:132
frame_queue_peek_readable
static Frame * frame_queue_peek_readable(FrameQueue *f)
Definition: ffplay.c:761
AVIOInterruptCB::opaque
void * opaque
Definition: avio.h:60
AV_PIX_FMT_RGB565
#define AV_PIX_FMT_RGB565
Definition: pixfmt.h:374
parse_loglevel
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:506
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
Clock::serial
int serial
Definition: ffplay.c:148
VideoState::height
int height
Definition: ffplay.c:292
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
AVStream::disposition
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:923
AVFMT_FLAG_GENPTS
#define AVFMT_FLAG_GENPTS
Generate missing pts even if it requires parsing future frames.
Definition: avformat.h:1474
VideoState::subpq
FrameQueue subpq
Definition: ffplay.c:224
seek_by_bytes
static int seek_by_bytes
Definition: ffplay.c:323
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:870
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
stream_seek
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
Definition: ffplay.c:1472
EXTERNAL_CLOCK_MAX_FRAMES
#define EXTERNAL_CLOCK_MAX_FRAMES
Definition: ffplay.c:69
AVSubtitleRect::h
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:3902
avcodec_flush_buffers
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: decode.c:2028
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_PIX_FMT_0RGB32
#define AV_PIX_FMT_0RGB32
Definition: pixfmt.h:364
video_stream
static AVStream * video_stream
Definition: demuxing_decoding.c:41
audio_open
static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
Definition: ffplay.c:2486
filter_nbthreads
static int filter_nbthreads
Definition: ffplay.c:357
log_callback_help
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
Definition: cmdutils.c:99
cursor_hidden
static int cursor_hidden
Definition: ffplay.c:349
VideoState::SHOW_MODE_RDFT
@ SHOW_MODE_RDFT
Definition: ffplay.c:262
find_stream_info
static int find_stream_info
Definition: ffplay.c:356
packet_queue_put_private
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:427
VideoState::audio_buf_index
int audio_buf_index
Definition: ffplay.c:248
avformat.h
iformat
static AVInputFormat * iformat
Definition: ffprobe.c:257
dict.h
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:383
VideoState::last_paused
int last_paused
Definition: ffplay.c:209
AV_LOG_SKIP_REPEATED
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:345
AV_PIX_FMT_UYVY422
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:81
AV_DICT_MATCH_CASE
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:69
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
rdftspeed
double rdftspeed
Definition: ffplay.c:347
MyAVPacketList::serial
int serial
Definition: ffplay.c:115
opt_width
static int opt_width(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3466
main
int main(int argc, char **argv)
Definition: ffplay.c:3666
avformat_network_deinit
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:5033
show_usage
static void show_usage(void)
Definition: ffplay.c:3623
avcodec_find_decoder_by_name
AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
Definition: allcodecs.c:918
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:76
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
AVFrame::height
int height
Definition: frame.h:353
CMDUTILS_COMMON_OPTIONS
#define CMDUTILS_COMMON_OPTIONS
Definition: cmdutils.h:212
PacketQueue::mutex
SDL_mutex * mutex
Definition: ffplay.c:125
packet_queue_start
static void packet_queue_start(PacketQueue *q)
Definition: ffplay.c:534
VideoState::vidclk
Clock vidclk
Definition: ffplay.c:220
audio_codec_name
static const char * audio_codec_name
Definition: ffplay.c:344
get_rotation
double get_rotation(AVStream *st)
Definition: cmdutils.c:2175
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
AV_SYNC_FRAMEDUP_THRESHOLD
#define AV_SYNC_FRAMEDUP_THRESHOLD
Definition: ffplay.c:84
show_mode
static enum ShowMode show_mode
Definition: ffplay.c:343
pkt
static AVPacket pkt
Definition: demuxing_decoding.c:54
PacketQueue::cond
SDL_cond * cond
Definition: ffplay.c:126
sws_freeContext
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2311
PacketQueue::size
int size
Definition: ffplay.c:121
options
static const OptionDef options[]
Definition: ffplay.c:3567
AVInputFormat::flags
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_NOTIMESTAMPS,...
Definition: avformat.h:659
AudioParams::channels
int channels
Definition: ffplay.c:136
AVRational::den
int den
Denominator.
Definition: rational.h:60
mode
mode
Definition: ebur128.h:83
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
config.h
VideoState::subtitle_stream
int subtitle_stream
Definition: ffplay.c:276
avfilter.h
VideoState::abort_request
int abort_request
Definition: ffplay.c:206
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:827
flush_pkt
static AVPacket flush_pkt
Definition: ffplay.c:363
VideoState::audio_buf1_size
unsigned int audio_buf1_size
Definition: ffplay.c:247
VideoState::eof
int eof
Definition: ffplay.c:289
AVFMT_NOGENSEARCH
#define AVFMT_NOGENSEARCH
Format does not allow to fall back on generic search.
Definition: avformat.h:474
AV_SYNC_THRESHOLD_MAX
#define AV_SYNC_THRESHOLD_MAX
Definition: ffplay.c:82
av_buffersink_get_channels
int av_buffersink_get_channels(const AVFilterContext *ctx)
decoder_destroy
static void decoder_destroy(Decoder *d)
Definition: ffplay.c:679
av_get_packed_sample_fmt
enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
Get the packed alternative form of the given sample format.
Definition: samplefmt.c:75
VideoState::read_tid
SDL_Thread * read_tid
Definition: ffplay.c:204
VideoState::audio_volume
int audio_volume
Definition: ffplay.c:250
VideoState::subdec
Decoder subdec
Definition: ffplay.c:229
AVIOContext::eof_reached
int eof_reached
true if was unable to read due to error or eof
Definition: avio.h:239
stream_has_enough_packets
static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue)
Definition: ffplay.c:2718
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
AVFormatContext::duration
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1457
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVPacket::stream_index
int stream_index
Definition: avcodec.h:1479
GROW_ARRAY
#define GROW_ARRAY(array, nb_elems)
Definition: cmdutils.h:619
SUBPICTURE_QUEUE_SIZE
#define SUBPICTURE_QUEUE_SIZE
Definition: ffplay.c:130
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
input_filename
static const char * input_filename
Definition: ffplay.c:311
stream_toggle_pause
static void stream_toggle_pause(VideoState *is)
Definition: ffplay.c:1486
VideoState::continue_read_thread
SDL_cond * continue_read_thread
Definition: ffplay.c:306
AV_SYNC_AUDIO_MASTER
@ AV_SYNC_AUDIO_MASTER
Definition: ffplay.c:183
av_dict_set_int
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set that converts the value to a string and stores it.
Definition: dict.c:147
toggle_audio_display
static void toggle_audio_display(VideoState *is)
Definition: ffplay.c:3204
AVCodecContext::codec_type
enum AVMediaType codec_type
Definition: avcodec.h:1573
opt_default
int opt_default(void *optctx, const char *opt, const char *arg)
Fallback for options that are not explicitly handled, these will be parsed through AVOptions.
Definition: cmdutils.c:545
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVFMT_TS_DISCONT
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:469
new_height
AVS_VideoFrame int int int int new_height
Definition: avisynth_c.h:913
get_video_frame
static int get_video_frame(VideoState *is, AVFrame *frame)
Definition: ffplay.c:1763
av_get_default_channel_layout
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
Definition: channel_layout.c:225
default_width
static int default_width
Definition: ffplay.c:313
AVIOInterruptCB::callback
int(* callback)(void *)
Definition: avio.h:59
VideoState::realtime
int realtime
Definition: ffplay.c:217
VideoState::sub_convert_ctx
struct SwsContext * sub_convert_ctx
Definition: ffplay.c:288
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:136
OPT_VIDEO
#define OPT_VIDEO
Definition: cmdutils.h:165
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVDictionaryEntry
Definition: dict.h:81
MyAVPacketList::pkt
AVPacket pkt
Definition: ffplay.c:113
AVPacket
This structure stores compressed data.
Definition: avcodec.h:1454
audio_disable
static int audio_disable
Definition: ffplay.c:319
refresh_loop_wait_event
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
Definition: ffplay.c:3216
channel_layouts
static const uint16_t channel_layouts[7]
Definition: dca_lbr.c:113
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:500
stream_component_close
static void stream_component_close(VideoState *is, int stream_index)
Definition: ffplay.c:1204
VideoState::subtitleq
PacketQueue subtitleq
Definition: ffplay.c:278
cmdutils.h
cmp_audio_fmts
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
Definition: ffplay.c:408
OPT_BOOL
#define OPT_BOOL
Definition: cmdutils.h:162
Decoder::decoder_tid
SDL_Thread * decoder_tid
Definition: ffplay.c:200
parse_time_or_die
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds.
Definition: cmdutils.c:165
framedrop
static int framedrop
Definition: ffplay.c:341
VideoState::audio_stream
int audio_stream
Definition: ffplay.c:231
imgutils.h
VideoState::audio_buf_size
unsigned int audio_buf_size
Definition: ffplay.c:246
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
av_rdft_end
void av_rdft_end(RDFTContext *s)
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
MyAVPacketList::next
struct MyAVPacketList * next
Definition: ffplay.c:114
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVFormatContext::start_time
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1447
PacketQueue::abort_request
int abort_request
Definition: ffplay.c:123
VideoState::ic
AVFormatContext * ic
Definition: ffplay.c:216
VideoState::viddec
Decoder viddec
Definition: ffplay.c:228
h
h
Definition: vp9dsp_template.c:2038
AVDictionaryEntry::value
char * value
Definition: dict.h:83
AVStream::start_time
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base.
Definition: avformat.h:909
VideoState::audio_buf
uint8_t * audio_buf
Definition: ffplay.c:244
AVFilterGraph::nb_filters
unsigned nb_filters
Definition: avfilter.h:843
avstring.h
frame_queue_peek_last
static Frame * frame_queue_peek_last(FrameQueue *f)
Definition: ffplay.c:740
VideoState::last_vis_time
double last_vis_time
Definition: ffplay.c:271
show_help_options
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags, int alt_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:177
decoder_reorder_pts
static int decoder_reorder_pts
Definition: ffplay.c:336
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:498
VideoState::audio_tgt
struct AudioParams audio_tgt
Definition: ffplay.c:256
audio_stream
static AVStream * audio_stream
Definition: demuxing_decoding.c:41
int
int
Definition: ffmpeg_filter.c:191
SwsContext
Definition: swscale_internal.h:280
VideoState::audclk
Clock audclk
Definition: ffplay.c:219
avfilter_get_class
const AVClass * avfilter_get_class(void)
Definition: avfilter.c:1635
AVFilterInOut
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:1003
FrameQueue::pktq
PacketQueue * pktq
Definition: ffplay.c:179
short
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are short
Definition: writing_filters.txt:89
snprintf
#define snprintf
Definition: snprintf.h:34
video_audio_display
static void video_audio_display(VideoState *s)
Definition: ffplay.c:1062
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
AV_SYNC_THRESHOLD_MIN
#define AV_SYNC_THRESHOLD_MIN
Definition: ffplay.c:80
buffersrc.h
AudioParams::bytes_per_sec
int bytes_per_sec
Definition: ffplay.c:140
check_external_clock_speed
static void check_external_clock_speed(VideoState *is)
Definition: ffplay.c:1457
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:3935
SAMPLE_CORRECTION_PERCENT_MAX
#define SAMPLE_CORRECTION_PERCENT_MAX
Definition: ffplay.c:89
EXTERNAL_CLOCK_SPEED_MAX
#define EXTERNAL_CLOCK_SPEED_MAX
Definition: ffplay.c:93
SWS_BICUBIC
#define SWS_BICUBIC
Definition: swscale.h:60
duration
static int64_t duration
Definition: ffplay.c:332
swscale.h
is_realtime
static int is_realtime(AVFormatContext *s)
Definition: ffplay.c:2725
av_x_if_null
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:308
Frame::height
int height
Definition: ffplay.c:162
decode_interrupt_cb
static int decode_interrupt_cb(void *ctx)
Definition: ffplay.c:2712
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2438
VideoState::frame_timer
double frame_timer
Definition: ffplay.c:280
VideoState::audio_clock_serial
int audio_clock_serial
Definition: ffplay.c:236
av_init_packet
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:33
av_find_best_stream
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
Definition: utils.c:4207
avdevice_register_all
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:65
nb_channels
int nb_channels
Definition: channel_layout.c:76
avio_feof
int avio_feof(AVIOContext *s)
Similar to feof() but also returns nonzero on read errors.
Definition: aviobuf.c:358
realloc_texture
static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
Definition: ffplay.c:838
AV_PIX_FMT_RGB444
#define AV_PIX_FMT_RGB444
Definition: pixfmt.h:376
exit_on_keydown
static int exit_on_keydown
Definition: ffplay.c:338