FFmpeg
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include "config_components.h"
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/mem.h"
37 #include "libavutil/pixdesc.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/fifo.h"
40 #include "libavutil/parseutils.h"
41 #include "libavutil/samplefmt.h"
42 #include "libavutil/time.h"
43 #include "libavutil/bprint.h"
44 #include "libavformat/avformat.h"
45 #include "libavdevice/avdevice.h"
46 #include "libswscale/swscale.h"
47 #include "libavutil/opt.h"
48 #include "libavutil/tx.h"
50 
51 #include "libavfilter/avfilter.h"
52 #include "libavfilter/buffersink.h"
53 #include "libavfilter/buffersrc.h"
54 
55 #include <SDL.h>
56 #include <SDL_thread.h>
57 
58 #include "cmdutils.h"
59 #include "ffplay_renderer.h"
60 #include "opt_common.h"
61 
62 const char program_name[] = "ffplay";
63 const int program_birth_year = 2003;
64 
65 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
66 #define MIN_FRAMES 25
67 #define EXTERNAL_CLOCK_MIN_FRAMES 2
68 #define EXTERNAL_CLOCK_MAX_FRAMES 10
69 
70 /* Minimum SDL audio buffer size, in samples. */
71 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
72 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
73 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
74 
75 /* Step size for volume control in dB */
76 #define SDL_VOLUME_STEP (0.75)
77 
78 /* no AV sync correction is done if below the minimum AV sync threshold */
79 #define AV_SYNC_THRESHOLD_MIN 0.04
80 /* AV sync correction is done if above the maximum AV sync threshold */
81 #define AV_SYNC_THRESHOLD_MAX 0.1
82 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
83 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
84 /* no AV correction is done if too big error */
85 #define AV_NOSYNC_THRESHOLD 10.0
86 
87 /* maximum audio speed change to get correct sync */
88 #define SAMPLE_CORRECTION_PERCENT_MAX 10
89 
90 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
91 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
92 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
93 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
94 
95 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
96 #define AUDIO_DIFF_AVG_NB 20
97 
98 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
99 #define REFRESH_RATE 0.01
100 
101 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
102 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
103 #define SAMPLE_ARRAY_SIZE (8 * 65536)
104 
105 #define CURSOR_HIDE_DELAY 1000000
106 
107 #define USE_ONEPASS_SUBTITLE_RENDER 1
108 
109 typedef struct MyAVPacketList {
111  int serial;
113 
114 typedef struct PacketQueue {
117  int size;
120  int serial;
121  SDL_mutex *mutex;
122  SDL_cond *cond;
123 } PacketQueue;
124 
125 #define VIDEO_PICTURE_QUEUE_SIZE 3
126 #define SUBPICTURE_QUEUE_SIZE 16
127 #define SAMPLE_QUEUE_SIZE 9
128 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
129 
130 typedef struct AudioParams {
131  int freq;
136 } AudioParams;
137 
138 typedef struct Clock {
139  double pts; /* clock base */
140  double pts_drift; /* clock base minus time at which we updated the clock */
141  double last_updated;
142  double speed;
143  int serial; /* clock is based on a packet with this serial */
144  int paused;
145  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
146 } Clock;
147 
148 typedef struct FrameData {
150 } FrameData;
151 
152 /* Common struct for handling all types of decoded data and allocated render buffers. */
153 typedef struct Frame {
156  int serial;
157  double pts; /* presentation timestamp for the frame */
158  double duration; /* estimated duration of the frame */
159  int64_t pos; /* byte position of the frame in the input file */
160  int width;
161  int height;
162  int format;
164  int uploaded;
165  int flip_v;
166 } Frame;
167 
168 typedef struct FrameQueue {
170  int rindex;
171  int windex;
172  int size;
173  int max_size;
176  SDL_mutex *mutex;
177  SDL_cond *cond;
179 } FrameQueue;
180 
181 enum {
182  AV_SYNC_AUDIO_MASTER, /* default choice */
184  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
185 };
186 
187 typedef struct Decoder {
192  int finished;
194  SDL_cond *empty_queue_cond;
199  SDL_Thread *decoder_tid;
200 } Decoder;
201 
202 typedef struct VideoState {
203  SDL_Thread *read_tid;
207  int paused;
210  int seek_req;
216  int realtime;
217 
221 
225 
229 
231 
233 
234  double audio_clock;
236  double audio_diff_cum; /* used for AV difference average computation */
243  uint8_t *audio_buf;
244  uint8_t *audio_buf1;
245  unsigned int audio_buf_size; /* in bytes */
246  unsigned int audio_buf1_size;
247  int audio_buf_index; /* in bytes */
250  int muted;
257 
258  enum ShowMode {
260  } show_mode;
267  float *real_data;
269  int xpos;
272  SDL_Texture *vis_texture;
273  SDL_Texture *sub_texture;
274  SDL_Texture *vid_texture;
275 
279 
280  double frame_timer;
286  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
288  int eof;
289 
290  char *filename;
292  int step;
293 
295  AVFilterContext *in_video_filter; // the first filter in the video chain
296  AVFilterContext *out_video_filter; // the last filter in the video chain
297  AVFilterContext *in_audio_filter; // the first filter in the audio chain
298  AVFilterContext *out_audio_filter; // the last filter in the audio chain
299  AVFilterGraph *agraph; // audio filter graph
300 
302 
304 } VideoState;
305 
306 /* options specified by the user */
308 static const char *input_filename;
309 static const char *window_title;
310 static int default_width = 640;
311 static int default_height = 480;
312 static int screen_width = 0;
313 static int screen_height = 0;
314 static int screen_left = SDL_WINDOWPOS_CENTERED;
315 static int screen_top = SDL_WINDOWPOS_CENTERED;
316 static int audio_disable;
317 static int video_disable;
318 static int subtitle_disable;
319 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
320 static int seek_by_bytes = -1;
321 static float seek_interval = 10;
322 static int display_disable;
323 static int borderless;
324 static int alwaysontop;
325 static int startup_volume = 100;
326 static int show_status = -1;
330 static int fast = 0;
331 static int genpts = 0;
332 static int lowres = 0;
333 static int decoder_reorder_pts = -1;
334 static int autoexit;
335 static int exit_on_keydown;
336 static int exit_on_mousedown;
337 static int loop = 1;
338 static int framedrop = -1;
339 static int infinite_buffer = -1;
340 static enum ShowMode show_mode = SHOW_MODE_NONE;
341 static const char *audio_codec_name;
342 static const char *subtitle_codec_name;
343 static const char *video_codec_name;
344 double rdftspeed = 0.02;
346 static int cursor_hidden = 0;
347 static const char **vfilters_list = NULL;
348 static int nb_vfilters = 0;
349 static char *afilters = NULL;
350 static int autorotate = 1;
351 static int find_stream_info = 1;
352 static int filter_nbthreads = 0;
353 static int enable_vulkan = 0;
354 static char *vulkan_params = NULL;
355 static char *video_background = NULL;
356 static const char *hwaccel = NULL;
357 
358 /* current context */
359 static int is_full_screen;
361 
362 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
363 
364 static SDL_Window *window;
365 static SDL_Renderer *renderer;
366 static SDL_RendererInfo renderer_info = {0};
367 static SDL_AudioDeviceID audio_dev;
368 
370 
371 static const struct TextureFormatEntry {
375  { AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
376  { AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
377  { AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
378  { AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
379  { AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
380  { AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
381  { AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
382  { AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
383  { AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
384  { AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
385  { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
386  { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
387  { AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
388  { AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
389  { AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
390  { AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
391  { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
392  { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
393  { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
394 };
395 
396 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
397 {
399  if (ret < 0)
400  return ret;
401 
403  if (!vfilters_list[nb_vfilters - 1])
404  return AVERROR(ENOMEM);
405 
406  return 0;
407 }
408 
409 static inline
410 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
411  enum AVSampleFormat fmt2, int64_t channel_count2)
412 {
413  /* If channel count == 1, planar and non-planar formats are the same */
414  if (channel_count1 == 1 && channel_count2 == 1)
416  else
417  return channel_count1 != channel_count2 || fmt1 != fmt2;
418 }
419 
421 {
422  MyAVPacketList pkt1;
423  int ret;
424 
425  if (q->abort_request)
426  return -1;
427 
428 
429  pkt1.pkt = pkt;
430  pkt1.serial = q->serial;
431 
432  ret = av_fifo_write(q->pkt_list, &pkt1, 1);
433  if (ret < 0)
434  return ret;
435  q->nb_packets++;
436  q->size += pkt1.pkt->size + sizeof(pkt1);
437  q->duration += pkt1.pkt->duration;
438  /* XXX: should duplicate packet data in DV case */
439  SDL_CondSignal(q->cond);
440  return 0;
441 }
442 
444 {
445  AVPacket *pkt1;
446  int ret;
447 
448  pkt1 = av_packet_alloc();
449  if (!pkt1) {
451  return -1;
452  }
453  av_packet_move_ref(pkt1, pkt);
454 
455  SDL_LockMutex(q->mutex);
456  ret = packet_queue_put_private(q, pkt1);
457  SDL_UnlockMutex(q->mutex);
458 
459  if (ret < 0)
460  av_packet_free(&pkt1);
461 
462  return ret;
463 }
464 
465 static int packet_queue_put_nullpacket(PacketQueue *q, AVPacket *pkt, int stream_index)
466 {
467  pkt->stream_index = stream_index;
468  return packet_queue_put(q, pkt);
469 }
470 
471 /* packet queue handling */
473 {
474  memset(q, 0, sizeof(PacketQueue));
476  if (!q->pkt_list)
477  return AVERROR(ENOMEM);
478  q->mutex = SDL_CreateMutex();
479  if (!q->mutex) {
480  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
481  return AVERROR(ENOMEM);
482  }
483  q->cond = SDL_CreateCond();
484  if (!q->cond) {
485  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
486  return AVERROR(ENOMEM);
487  }
488  q->abort_request = 1;
489  return 0;
490 }
491 
493 {
494  MyAVPacketList pkt1;
495 
496  SDL_LockMutex(q->mutex);
497  while (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0)
498  av_packet_free(&pkt1.pkt);
499  q->nb_packets = 0;
500  q->size = 0;
501  q->duration = 0;
502  q->serial++;
503  SDL_UnlockMutex(q->mutex);
504 }
505 
507 {
510  SDL_DestroyMutex(q->mutex);
511  SDL_DestroyCond(q->cond);
512 }
513 
515 {
516  SDL_LockMutex(q->mutex);
517 
518  q->abort_request = 1;
519 
520  SDL_CondSignal(q->cond);
521 
522  SDL_UnlockMutex(q->mutex);
523 }
524 
526 {
527  SDL_LockMutex(q->mutex);
528  q->abort_request = 0;
529  q->serial++;
530  SDL_UnlockMutex(q->mutex);
531 }
532 
533 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
534 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
535 {
536  MyAVPacketList pkt1;
537  int ret;
538 
539  SDL_LockMutex(q->mutex);
540 
541  for (;;) {
542  if (q->abort_request) {
543  ret = -1;
544  break;
545  }
546 
547  if (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0) {
548  q->nb_packets--;
549  q->size -= pkt1.pkt->size + sizeof(pkt1);
550  q->duration -= pkt1.pkt->duration;
551  av_packet_move_ref(pkt, pkt1.pkt);
552  if (serial)
553  *serial = pkt1.serial;
554  av_packet_free(&pkt1.pkt);
555  ret = 1;
556  break;
557  } else if (!block) {
558  ret = 0;
559  break;
560  } else {
561  SDL_CondWait(q->cond, q->mutex);
562  }
563  }
564  SDL_UnlockMutex(q->mutex);
565  return ret;
566 }
567 
568 static int decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
569  memset(d, 0, sizeof(Decoder));
570  d->pkt = av_packet_alloc();
571  if (!d->pkt)
572  return AVERROR(ENOMEM);
573  d->avctx = avctx;
574  d->queue = queue;
575  d->empty_queue_cond = empty_queue_cond;
577  d->pkt_serial = -1;
578  return 0;
579 }
580 
582  int ret = AVERROR(EAGAIN);
583 
584  for (;;) {
585  if (d->queue->serial == d->pkt_serial) {
586  do {
587  if (d->queue->abort_request)
588  return -1;
589 
590  switch (d->avctx->codec_type) {
591  case AVMEDIA_TYPE_VIDEO:
593  if (ret >= 0) {
594  if (decoder_reorder_pts == -1) {
595  frame->pts = frame->best_effort_timestamp;
596  } else if (!decoder_reorder_pts) {
597  frame->pts = frame->pkt_dts;
598  }
599  }
600  break;
601  case AVMEDIA_TYPE_AUDIO:
603  if (ret >= 0) {
604  AVRational tb = (AVRational){1, frame->sample_rate};
605  if (frame->pts != AV_NOPTS_VALUE)
606  frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);
607  else if (d->next_pts != AV_NOPTS_VALUE)
608  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
609  if (frame->pts != AV_NOPTS_VALUE) {
610  d->next_pts = frame->pts + frame->nb_samples;
611  d->next_pts_tb = tb;
612  }
613  }
614  break;
615  }
616  if (ret == AVERROR_EOF) {
617  d->finished = d->pkt_serial;
619  return 0;
620  }
621  if (ret >= 0)
622  return 1;
623  } while (ret != AVERROR(EAGAIN));
624  }
625 
626  do {
627  if (d->queue->nb_packets == 0)
628  SDL_CondSignal(d->empty_queue_cond);
629  if (d->packet_pending) {
630  d->packet_pending = 0;
631  } else {
632  int old_serial = d->pkt_serial;
633  if (packet_queue_get(d->queue, d->pkt, 1, &d->pkt_serial) < 0)
634  return -1;
635  if (old_serial != d->pkt_serial) {
637  d->finished = 0;
638  d->next_pts = d->start_pts;
639  d->next_pts_tb = d->start_pts_tb;
640  }
641  }
642  if (d->queue->serial == d->pkt_serial)
643  break;
644  av_packet_unref(d->pkt);
645  } while (1);
646 
648  int got_frame = 0;
649  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, d->pkt);
650  if (ret < 0) {
651  ret = AVERROR(EAGAIN);
652  } else {
653  if (got_frame && !d->pkt->data) {
654  d->packet_pending = 1;
655  }
656  ret = got_frame ? 0 : (d->pkt->data ? AVERROR(EAGAIN) : AVERROR_EOF);
657  }
658  av_packet_unref(d->pkt);
659  } else {
660  if (d->pkt->buf && !d->pkt->opaque_ref) {
661  FrameData *fd;
662 
663  d->pkt->opaque_ref = av_buffer_allocz(sizeof(*fd));
664  if (!d->pkt->opaque_ref)
665  return AVERROR(ENOMEM);
666  fd = (FrameData*)d->pkt->opaque_ref->data;
667  fd->pkt_pos = d->pkt->pos;
668  }
669 
670  if (avcodec_send_packet(d->avctx, d->pkt) == AVERROR(EAGAIN)) {
671  av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
672  d->packet_pending = 1;
673  } else {
674  av_packet_unref(d->pkt);
675  }
676  }
677  }
678 }
679 
680 static void decoder_destroy(Decoder *d) {
681  av_packet_free(&d->pkt);
683 }
684 
686 {
687  av_frame_unref(vp->frame);
688  avsubtitle_free(&vp->sub);
689 }
690 
691 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
692 {
693  int i;
694  memset(f, 0, sizeof(FrameQueue));
695  if (!(f->mutex = SDL_CreateMutex())) {
696  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
697  return AVERROR(ENOMEM);
698  }
699  if (!(f->cond = SDL_CreateCond())) {
700  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
701  return AVERROR(ENOMEM);
702  }
703  f->pktq = pktq;
704  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
705  f->keep_last = !!keep_last;
706  for (i = 0; i < f->max_size; i++)
707  if (!(f->queue[i].frame = av_frame_alloc()))
708  return AVERROR(ENOMEM);
709  return 0;
710 }
711 
713 {
714  int i;
715  for (i = 0; i < f->max_size; i++) {
716  Frame *vp = &f->queue[i];
718  av_frame_free(&vp->frame);
719  }
720  SDL_DestroyMutex(f->mutex);
721  SDL_DestroyCond(f->cond);
722 }
723 
725 {
726  SDL_LockMutex(f->mutex);
727  SDL_CondSignal(f->cond);
728  SDL_UnlockMutex(f->mutex);
729 }
730 
732 {
733  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
734 }
735 
737 {
738  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
739 }
740 
742 {
743  return &f->queue[f->rindex];
744 }
745 
747 {
748  /* wait until we have space to put a new frame */
749  SDL_LockMutex(f->mutex);
750  while (f->size >= f->max_size &&
751  !f->pktq->abort_request) {
752  SDL_CondWait(f->cond, f->mutex);
753  }
754  SDL_UnlockMutex(f->mutex);
755 
756  if (f->pktq->abort_request)
757  return NULL;
758 
759  return &f->queue[f->windex];
760 }
761 
763 {
764  /* wait until we have a readable a new frame */
765  SDL_LockMutex(f->mutex);
766  while (f->size - f->rindex_shown <= 0 &&
767  !f->pktq->abort_request) {
768  SDL_CondWait(f->cond, f->mutex);
769  }
770  SDL_UnlockMutex(f->mutex);
771 
772  if (f->pktq->abort_request)
773  return NULL;
774 
775  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
776 }
777 
779 {
780  if (++f->windex == f->max_size)
781  f->windex = 0;
782  SDL_LockMutex(f->mutex);
783  f->size++;
784  SDL_CondSignal(f->cond);
785  SDL_UnlockMutex(f->mutex);
786 }
787 
789 {
790  if (f->keep_last && !f->rindex_shown) {
791  f->rindex_shown = 1;
792  return;
793  }
794  frame_queue_unref_item(&f->queue[f->rindex]);
795  if (++f->rindex == f->max_size)
796  f->rindex = 0;
797  SDL_LockMutex(f->mutex);
798  f->size--;
799  SDL_CondSignal(f->cond);
800  SDL_UnlockMutex(f->mutex);
801 }
802 
803 /* return the number of undisplayed frames in the queue */
805 {
806  return f->size - f->rindex_shown;
807 }
808 
809 /* return last shown position */
811 {
812  Frame *fp = &f->queue[f->rindex];
813  if (f->rindex_shown && fp->serial == f->pktq->serial)
814  return fp->pos;
815  else
816  return -1;
817 }
818 
819 static void decoder_abort(Decoder *d, FrameQueue *fq)
820 {
822  frame_queue_signal(fq);
823  SDL_WaitThread(d->decoder_tid, NULL);
824  d->decoder_tid = NULL;
826 }
827 
828 static inline void fill_rectangle(int x, int y, int w, int h)
829 {
830  SDL_Rect rect;
831  rect.x = x;
832  rect.y = y;
833  rect.w = w;
834  rect.h = h;
835  if (w && h)
836  SDL_RenderFillRect(renderer, &rect);
837 }
838 
839 static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
840 {
841  Uint32 format;
842  int access, w, h;
843  if (!*texture || SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
844  void *pixels;
845  int pitch;
846  if (*texture)
847  SDL_DestroyTexture(*texture);
848  if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
849  return -1;
850  if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
851  return -1;
852  if (init_texture) {
853  if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
854  return -1;
855  memset(pixels, 0, pitch * new_height);
856  SDL_UnlockTexture(*texture);
857  }
858  av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
859  }
860  return 0;
861 }
862 
863 static void calculate_display_rect(SDL_Rect *rect,
864  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
865  int pic_width, int pic_height, AVRational pic_sar)
866 {
867  AVRational aspect_ratio = pic_sar;
868  int64_t width, height, x, y;
869 
870  if (av_cmp_q(aspect_ratio, av_make_q(0, 1)) <= 0)
871  aspect_ratio = av_make_q(1, 1);
872 
873  aspect_ratio = av_mul_q(aspect_ratio, av_make_q(pic_width, pic_height));
874 
875  /* XXX: we suppose the screen has a 1.0 pixel ratio */
876  height = scr_height;
877  width = av_rescale(height, aspect_ratio.num, aspect_ratio.den) & ~1;
878  if (width > scr_width) {
879  width = scr_width;
880  height = av_rescale(width, aspect_ratio.den, aspect_ratio.num) & ~1;
881  }
882  x = (scr_width - width) / 2;
883  y = (scr_height - height) / 2;
884  rect->x = scr_xleft + x;
885  rect->y = scr_ytop + y;
886  rect->w = FFMAX((int)width, 1);
887  rect->h = FFMAX((int)height, 1);
888 }
889 
890 static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
891 {
892  int i;
893  *sdl_blendmode = SDL_BLENDMODE_NONE;
894  *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
895  if (format == AV_PIX_FMT_RGB32 ||
899  *sdl_blendmode = SDL_BLENDMODE_BLEND;
900  for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map); i++) {
902  *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
903  return;
904  }
905  }
906 }
907 
908 static int upload_texture(SDL_Texture **tex, AVFrame *frame)
909 {
910  int ret = 0;
911  Uint32 sdl_pix_fmt;
912  SDL_BlendMode sdl_blendmode;
913  get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
914  if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
915  return -1;
916  switch (sdl_pix_fmt) {
917  case SDL_PIXELFORMAT_IYUV:
918  if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
919  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
920  frame->data[1], frame->linesize[1],
921  frame->data[2], frame->linesize[2]);
922  } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
923  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
924  frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
925  frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
926  } else {
927  av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
928  return -1;
929  }
930  break;
931  default:
932  if (frame->linesize[0] < 0) {
933  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
934  } else {
935  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
936  }
937  break;
938  }
939  return ret;
940 }
941 
946 };
947 
951 };
952 
954 {
955 #if SDL_VERSION_ATLEAST(2,0,8)
956  SDL_YUV_CONVERSION_MODE mode = SDL_YUV_CONVERSION_AUTOMATIC;
957  if (frame && (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUYV422 || frame->format == AV_PIX_FMT_UYVY422)) {
958  if (frame->color_range == AVCOL_RANGE_JPEG)
959  mode = SDL_YUV_CONVERSION_JPEG;
960  else if (frame->colorspace == AVCOL_SPC_BT709)
961  mode = SDL_YUV_CONVERSION_BT709;
962  else if (frame->colorspace == AVCOL_SPC_BT470BG || frame->colorspace == AVCOL_SPC_SMPTE170M)
963  mode = SDL_YUV_CONVERSION_BT601;
964  }
965  SDL_SetYUVConversionMode(mode); /* FIXME: no support for linear transfer */
966 #endif
967 }
968 
970 {
971  const int tile_size = VIDEO_BACKGROUND_TILE_SIZE;
972  SDL_Rect *rect = &is->render_params.target_rect;
973  SDL_BlendMode blendMode;
974 
975  if (!SDL_GetTextureBlendMode(is->vid_texture, &blendMode) && blendMode == SDL_BLENDMODE_BLEND) {
976  switch (is->render_params.video_background_type) {
978  SDL_SetRenderDrawColor(renderer, 237, 237, 237, 255);
979  fill_rectangle(rect->x, rect->y, rect->w, rect->h);
980  SDL_SetRenderDrawColor(renderer, 222, 222, 222, 255);
981  for (int x = 0; x < rect->w; x += tile_size * 2)
982  fill_rectangle(rect->x + x, rect->y, FFMIN(tile_size, rect->w - x), rect->h);
983  for (int y = 0; y < rect->h; y += tile_size * 2)
984  fill_rectangle(rect->x, rect->y + y, rect->w, FFMIN(tile_size, rect->h - y));
985  SDL_SetRenderDrawColor(renderer, 237, 237, 237, 255);
986  for (int y = 0; y < rect->h; y += tile_size * 2) {
987  int h = FFMIN(tile_size, rect->h - y);
988  for (int x = 0; x < rect->w; x += tile_size * 2)
989  fill_rectangle(x + rect->x, y + rect->y, FFMIN(tile_size, rect->w - x), h);
990  }
991  break;
992  case VIDEO_BACKGROUND_COLOR: {
993  const uint8_t *c = is->render_params.video_background_color;
994  SDL_SetRenderDrawColor(renderer, c[0], c[1], c[2], c[3]);
995  fill_rectangle(rect->x, rect->y, rect->w, rect->h);
996  break;
997  }
999  SDL_SetTextureBlendMode(is->vid_texture, SDL_BLENDMODE_NONE);
1000  break;
1001  }
1002  }
1003 }
1004 
1006 {
1007  Frame *vp;
1008  Frame *sp = NULL;
1009  SDL_Rect *rect = &is->render_params.target_rect;
1010 
1011  vp = frame_queue_peek_last(&is->pictq);
1012  calculate_display_rect(rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1013  if (vk_renderer) {
1014  vk_renderer_display(vk_renderer, vp->frame, &is->render_params);
1015  return;
1016  }
1017 
1018  if (is->subtitle_st) {
1019  if (frame_queue_nb_remaining(&is->subpq) > 0) {
1020  sp = frame_queue_peek(&is->subpq);
1021 
1022  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
1023  if (!sp->uploaded) {
1024  uint8_t* pixels[4];
1025  int pitch[4];
1026  int i;
1027  if (!sp->width || !sp->height) {
1028  sp->width = vp->width;
1029  sp->height = vp->height;
1030  }
1031  if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
1032  return;
1033 
1034  for (i = 0; i < sp->sub.num_rects; i++) {
1035  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1036 
1037  sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
1038  sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
1039  sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
1040  sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
1041 
1042  is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx,
1043  sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
1044  sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
1045  0, NULL, NULL, NULL);
1046  if (!is->sub_convert_ctx) {
1047  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1048  return;
1049  }
1050  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) {
1051  sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
1052  0, sub_rect->h, pixels, pitch);
1053  SDL_UnlockTexture(is->sub_texture);
1054  }
1055  }
1056  sp->uploaded = 1;
1057  }
1058  } else
1059  sp = NULL;
1060  }
1061  }
1062 
1064 
1065  if (!vp->uploaded) {
1066  if (upload_texture(&is->vid_texture, vp->frame) < 0) {
1068  return;
1069  }
1070  vp->uploaded = 1;
1071  vp->flip_v = vp->frame->linesize[0] < 0;
1072  }
1073 
1075  SDL_RenderCopyEx(renderer, is->vid_texture, NULL, rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
1077  if (sp) {
1078 #if USE_ONEPASS_SUBTITLE_RENDER
1079  SDL_RenderCopy(renderer, is->sub_texture, NULL, rect);
1080 #else
1081  int i;
1082  double xratio = (double)rect->w / (double)sp->width;
1083  double yratio = (double)rect->h / (double)sp->height;
1084  for (i = 0; i < sp->sub.num_rects; i++) {
1085  SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
1086  SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
1087  .y = rect.y + sub_rect->y * yratio,
1088  .w = sub_rect->w * xratio,
1089  .h = sub_rect->h * yratio};
1090  SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
1091  }
1092 #endif
1093  }
1094 }
1095 
1096 static inline int compute_mod(int a, int b)
1097 {
1098  return a < 0 ? a%b + b : a%b;
1099 }
1100 
1102 {
1103  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1104  int ch, channels, h, h2;
1105  int64_t time_diff;
1106  int rdft_bits, nb_freq;
1107 
1108  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1109  ;
1110  nb_freq = 1 << (rdft_bits - 1);
1111 
1112  /* compute display index : center on currently output samples */
1113  channels = s->audio_tgt.ch_layout.nb_channels;
1114  nb_display_channels = channels;
1115  if (!s->paused) {
1116  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1117  n = 2 * channels;
1118  delay = s->audio_write_buf_size;
1119  delay /= n;
1120 
1121  /* to be more precise, we take into account the time spent since
1122  the last buffer computation */
1123  if (audio_callback_time) {
1124  time_diff = av_gettime_relative() - audio_callback_time;
1125  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1126  }
1127 
1128  delay += 2 * data_used;
1129  if (delay < data_used)
1130  delay = data_used;
1131 
1132  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1133  if (s->show_mode == SHOW_MODE_WAVES) {
1134  h = INT_MIN;
1135  for (i = 0; i < 1000; i += channels) {
1136  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1137  int a = s->sample_array[idx];
1138  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1139  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1140  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1141  int score = a - d;
1142  if (h < score && (b ^ c) < 0) {
1143  h = score;
1144  i_start = idx;
1145  }
1146  }
1147  }
1148 
1149  s->last_i_start = i_start;
1150  } else {
1151  i_start = s->last_i_start;
1152  }
1153 
1154  if (s->show_mode == SHOW_MODE_WAVES) {
1155  SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
1156 
1157  /* total height for one channel */
1158  h = s->height / nb_display_channels;
1159  /* graph height / 2 */
1160  h2 = (h * 9) / 20;
1161  for (ch = 0; ch < nb_display_channels; ch++) {
1162  i = i_start + ch;
1163  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1164  for (x = 0; x < s->width; x++) {
1165  y = (s->sample_array[i] * h2) >> 15;
1166  if (y < 0) {
1167  y = -y;
1168  ys = y1 - y;
1169  } else {
1170  ys = y1;
1171  }
1172  fill_rectangle(s->xleft + x, ys, 1, y);
1173  i += channels;
1174  if (i >= SAMPLE_ARRAY_SIZE)
1175  i -= SAMPLE_ARRAY_SIZE;
1176  }
1177  }
1178 
1179  SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
1180 
1181  for (ch = 1; ch < nb_display_channels; ch++) {
1182  y = s->ytop + ch * h;
1183  fill_rectangle(s->xleft, y, s->width, 1);
1184  }
1185  } else {
1186  int err = 0;
1187  if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
1188  return;
1189 
1190  if (s->xpos >= s->width)
1191  s->xpos = 0;
1192  nb_display_channels= FFMIN(nb_display_channels, 2);
1193  if (rdft_bits != s->rdft_bits) {
1194  const float rdft_scale = 1.0;
1195  av_tx_uninit(&s->rdft);
1196  av_freep(&s->real_data);
1197  av_freep(&s->rdft_data);
1198  s->rdft_bits = rdft_bits;
1199  s->real_data = av_malloc_array(nb_freq, 4 *sizeof(*s->real_data));
1200  s->rdft_data = av_malloc_array(nb_freq + 1, 2 *sizeof(*s->rdft_data));
1201  err = av_tx_init(&s->rdft, &s->rdft_fn, AV_TX_FLOAT_RDFT,
1202  0, 1 << rdft_bits, &rdft_scale, 0);
1203  }
1204  if (err < 0 || !s->rdft_data) {
1205  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1206  s->show_mode = SHOW_MODE_WAVES;
1207  } else {
1208  float *data_in[2];
1209  AVComplexFloat *data[2];
1210  SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
1211  uint32_t *pixels;
1212  int pitch;
1213  for (ch = 0; ch < nb_display_channels; ch++) {
1214  data_in[ch] = s->real_data + 2 * nb_freq * ch;
1215  data[ch] = s->rdft_data + nb_freq * ch;
1216  i = i_start + ch;
1217  for (x = 0; x < 2 * nb_freq; x++) {
1218  double w = (x-nb_freq) * (1.0 / nb_freq);
1219  data_in[ch][x] = s->sample_array[i] * (1.0 - w * w);
1220  i += channels;
1221  if (i >= SAMPLE_ARRAY_SIZE)
1222  i -= SAMPLE_ARRAY_SIZE;
1223  }
1224  s->rdft_fn(s->rdft, data[ch], data_in[ch], sizeof(float));
1225  data[ch][0].im = data[ch][nb_freq].re;
1226  data[ch][nb_freq].re = 0;
1227  }
1228  /* Least efficient way to do this, we should of course
1229  * directly access it but it is more than fast enough. */
1230  if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
1231  pitch >>= 2;
1232  pixels += pitch * s->height;
1233  for (y = 0; y < s->height; y++) {
1234  double w = 1 / sqrt(nb_freq);
1235  int a = sqrt(w * sqrt(data[0][y].re * data[0][y].re + data[0][y].im * data[0][y].im));
1236  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][y].re, data[1][y].im))
1237  : a;
1238  a = FFMIN(a, 255);
1239  b = FFMIN(b, 255);
1240  pixels -= pitch;
1241  *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
1242  }
1243  SDL_UnlockTexture(s->vis_texture);
1244  }
1245  SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
1246  }
1247  if (!s->paused)
1248  s->xpos++;
1249  }
1250 }
1251 
1252 static void stream_component_close(VideoState *is, int stream_index)
1253 {
1254  AVFormatContext *ic = is->ic;
1255  AVCodecParameters *codecpar;
1256 
1257  if (stream_index < 0 || stream_index >= ic->nb_streams)
1258  return;
1259  codecpar = ic->streams[stream_index]->codecpar;
1260 
1261  switch (codecpar->codec_type) {
1262  case AVMEDIA_TYPE_AUDIO:
1263  decoder_abort(&is->auddec, &is->sampq);
1264  SDL_CloseAudioDevice(audio_dev);
1265  decoder_destroy(&is->auddec);
1266  swr_free(&is->swr_ctx);
1267  av_freep(&is->audio_buf1);
1268  is->audio_buf1_size = 0;
1269  is->audio_buf = NULL;
1270 
1271  if (is->rdft) {
1272  av_tx_uninit(&is->rdft);
1273  av_freep(&is->real_data);
1274  av_freep(&is->rdft_data);
1275  is->rdft = NULL;
1276  is->rdft_bits = 0;
1277  }
1278  break;
1279  case AVMEDIA_TYPE_VIDEO:
1280  decoder_abort(&is->viddec, &is->pictq);
1281  decoder_destroy(&is->viddec);
1282  break;
1283  case AVMEDIA_TYPE_SUBTITLE:
1284  decoder_abort(&is->subdec, &is->subpq);
1285  decoder_destroy(&is->subdec);
1286  break;
1287  default:
1288  break;
1289  }
1290 
1291  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1292  switch (codecpar->codec_type) {
1293  case AVMEDIA_TYPE_AUDIO:
1294  is->audio_st = NULL;
1295  is->audio_stream = -1;
1296  break;
1297  case AVMEDIA_TYPE_VIDEO:
1298  is->video_st = NULL;
1299  is->video_stream = -1;
1300  break;
1301  case AVMEDIA_TYPE_SUBTITLE:
1302  is->subtitle_st = NULL;
1303  is->subtitle_stream = -1;
1304  break;
1305  default:
1306  break;
1307  }
1308 }
1309 
1311 {
1312  /* XXX: use a special url_shutdown call to abort parse cleanly */
1313  is->abort_request = 1;
1314  SDL_WaitThread(is->read_tid, NULL);
1315 
1316  /* close each stream */
1317  if (is->audio_stream >= 0)
1318  stream_component_close(is, is->audio_stream);
1319  if (is->video_stream >= 0)
1320  stream_component_close(is, is->video_stream);
1321  if (is->subtitle_stream >= 0)
1322  stream_component_close(is, is->subtitle_stream);
1323 
1324  avformat_close_input(&is->ic);
1325 
1326  packet_queue_destroy(&is->videoq);
1327  packet_queue_destroy(&is->audioq);
1328  packet_queue_destroy(&is->subtitleq);
1329 
1330  /* free all pictures */
1331  frame_queue_destroy(&is->pictq);
1332  frame_queue_destroy(&is->sampq);
1333  frame_queue_destroy(&is->subpq);
1334  SDL_DestroyCond(is->continue_read_thread);
1335  sws_freeContext(is->sub_convert_ctx);
1336  av_free(is->filename);
1337  if (is->vis_texture)
1338  SDL_DestroyTexture(is->vis_texture);
1339  if (is->vid_texture)
1340  SDL_DestroyTexture(is->vid_texture);
1341  if (is->sub_texture)
1342  SDL_DestroyTexture(is->sub_texture);
1343  av_free(is);
1344 }
1345 
1346 static void do_exit(VideoState *is)
1347 {
1348  if (is) {
1349  stream_close(is);
1350  }
1351  if (renderer)
1352  SDL_DestroyRenderer(renderer);
1353  if (vk_renderer)
1355  if (window)
1356  SDL_DestroyWindow(window);
1357  uninit_opts();
1358  for (int i = 0; i < nb_vfilters; i++)
1366  if (show_status)
1367  printf("\n");
1368  SDL_Quit();
1369  av_log(NULL, AV_LOG_QUIET, "%s", "");
1370  exit(0);
1371 }
1372 
1373 static void sigterm_handler(int sig)
1374 {
1375  exit(123);
1376 }
1377 
1379 {
1380  SDL_Rect rect;
1381  int max_width = screen_width ? screen_width : INT_MAX;
1382  int max_height = screen_height ? screen_height : INT_MAX;
1383  if (max_width == INT_MAX && max_height == INT_MAX)
1384  max_height = height;
1385  calculate_display_rect(&rect, 0, 0, max_width, max_height, width, height, sar);
1386  default_width = rect.w;
1387  default_height = rect.h;
1388 }
1389 
1391 {
1392  int w,h;
1393 
1396 
1397  if (!window_title)
1399  SDL_SetWindowTitle(window, window_title);
1400 
1401  SDL_SetWindowSize(window, w, h);
1402  SDL_SetWindowPosition(window, screen_left, screen_top);
1403  if (is_full_screen)
1404  SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1405  SDL_ShowWindow(window);
1406 
1407  is->width = w;
1408  is->height = h;
1409 
1410  return 0;
1411 }
1412 
1413 /* display the current picture, if any */
1415 {
1416  if (!is->width)
1417  video_open(is);
1418 
1419  SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1420  SDL_RenderClear(renderer);
1421  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1423  else if (is->video_st)
1425  SDL_RenderPresent(renderer);
1426 }
1427 
1428 static double get_clock(Clock *c)
1429 {
1430  if (*c->queue_serial != c->serial)
1431  return NAN;
1432  if (c->paused) {
1433  return c->pts;
1434  } else {
1435  double time = av_gettime_relative() / 1000000.0;
1436  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1437  }
1438 }
1439 
1440 static void set_clock_at(Clock *c, double pts, int serial, double time)
1441 {
1442  c->pts = pts;
1443  c->last_updated = time;
1444  c->pts_drift = c->pts - time;
1445  c->serial = serial;
1446 }
1447 
1448 static void set_clock(Clock *c, double pts, int serial)
1449 {
1450  double time = av_gettime_relative() / 1000000.0;
1451  set_clock_at(c, pts, serial, time);
1452 }
1453 
1454 static void set_clock_speed(Clock *c, double speed)
1455 {
1456  set_clock(c, get_clock(c), c->serial);
1457  c->speed = speed;
1458 }
1459 
1460 static void init_clock(Clock *c, int *queue_serial)
1461 {
1462  c->speed = 1.0;
1463  c->paused = 0;
1464  c->queue_serial = queue_serial;
1465  set_clock(c, NAN, -1);
1466 }
1467 
1468 static void sync_clock_to_slave(Clock *c, Clock *slave)
1469 {
1470  double clock = get_clock(c);
1471  double slave_clock = get_clock(slave);
1472  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1473  set_clock(c, slave_clock, slave->serial);
1474 }
1475 
1477  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1478  if (is->video_st)
1479  return AV_SYNC_VIDEO_MASTER;
1480  else
1481  return AV_SYNC_AUDIO_MASTER;
1482  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1483  if (is->audio_st)
1484  return AV_SYNC_AUDIO_MASTER;
1485  else
1486  return AV_SYNC_EXTERNAL_CLOCK;
1487  } else {
1488  return AV_SYNC_EXTERNAL_CLOCK;
1489  }
1490 }
1491 
1492 /* get the current master clock value */
1494 {
1495  double val;
1496 
1497  switch (get_master_sync_type(is)) {
1498  case AV_SYNC_VIDEO_MASTER:
1499  val = get_clock(&is->vidclk);
1500  break;
1501  case AV_SYNC_AUDIO_MASTER:
1502  val = get_clock(&is->audclk);
1503  break;
1504  default:
1505  val = get_clock(&is->extclk);
1506  break;
1507  }
1508  return val;
1509 }
1510 
1512  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1513  is->audio_stream >= 0 && is->audioq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES) {
1515  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1516  (is->audio_stream < 0 || is->audioq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES)) {
1518  } else {
1519  double speed = is->extclk.speed;
1520  if (speed != 1.0)
1521  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1522  }
1523 }
1524 
1525 /* seek in the stream */
1526 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int by_bytes)
1527 {
1528  if (!is->seek_req) {
1529  is->seek_pos = pos;
1530  is->seek_rel = rel;
1531  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1532  if (by_bytes)
1533  is->seek_flags |= AVSEEK_FLAG_BYTE;
1534  is->seek_req = 1;
1535  SDL_CondSignal(is->continue_read_thread);
1536  }
1537 }
1538 
1539 /* pause or resume the video */
1541 {
1542  if (is->paused) {
1543  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1544  if (is->read_pause_return != AVERROR(ENOSYS)) {
1545  is->vidclk.paused = 0;
1546  }
1547  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1548  }
1549  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1550  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1551 }
1552 
1554 {
1556  is->step = 0;
1557 }
1558 
1560 {
1561  is->muted = !is->muted;
1562 }
1563 
1564 static void update_volume(VideoState *is, int sign, double step)
1565 {
1566  double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1567  int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0));
1568  is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1569 }
1570 
1572 {
1573  /* if the stream is paused unpause it, then step */
1574  if (is->paused)
1576  is->step = 1;
1577 }
1578 
1579 static double compute_target_delay(double delay, VideoState *is)
1580 {
1581  double sync_threshold, diff = 0;
1582 
1583  /* update delay to follow master synchronisation source */
1585  /* if video is slave, we try to correct big delays by
1586  duplicating or deleting a frame */
1587  diff = get_clock(&is->vidclk) - get_master_clock(is);
1588 
1589  /* skip or repeat frame. We take into account the
1590  delay to compute the threshold. I still don't know
1591  if it is the best guess */
1592  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1593  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1594  if (diff <= -sync_threshold)
1595  delay = FFMAX(0, delay + diff);
1596  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1597  delay = delay + diff;
1598  else if (diff >= sync_threshold)
1599  delay = 2 * delay;
1600  }
1601  }
1602 
1603  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1604  delay, -diff);
1605 
1606  return delay;
1607 }
1608 
1609 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1610  if (vp->serial == nextvp->serial) {
1611  double duration = nextvp->pts - vp->pts;
1612  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1613  return vp->duration;
1614  else
1615  return duration;
1616  } else {
1617  return 0.0;
1618  }
1619 }
1620 
1621 static void update_video_pts(VideoState *is, double pts, int serial)
1622 {
1623  /* update current video pts */
1624  set_clock(&is->vidclk, pts, serial);
1625  sync_clock_to_slave(&is->extclk, &is->vidclk);
1626 }
1627 
1628 /* called to display each frame */
1629 static void video_refresh(void *opaque, double *remaining_time)
1630 {
1631  VideoState *is = opaque;
1632  double time;
1633 
1634  Frame *sp, *sp2;
1635 
1636  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1638 
1639  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1640  time = av_gettime_relative() / 1000000.0;
1641  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1642  video_display(is);
1643  is->last_vis_time = time;
1644  }
1645  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1646  }
1647 
1648  if (is->video_st) {
1649 retry:
1650  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1651  // nothing to do, no picture to display in the queue
1652  } else {
1653  double last_duration, duration, delay;
1654  Frame *vp, *lastvp;
1655 
1656  /* dequeue the picture */
1657  lastvp = frame_queue_peek_last(&is->pictq);
1658  vp = frame_queue_peek(&is->pictq);
1659 
1660  if (vp->serial != is->videoq.serial) {
1661  frame_queue_next(&is->pictq);
1662  goto retry;
1663  }
1664 
1665  if (lastvp->serial != vp->serial)
1666  is->frame_timer = av_gettime_relative() / 1000000.0;
1667 
1668  if (is->paused)
1669  goto display;
1670 
1671  /* compute nominal last_duration */
1672  last_duration = vp_duration(is, lastvp, vp);
1673  delay = compute_target_delay(last_duration, is);
1674 
1675  time= av_gettime_relative()/1000000.0;
1676  if (time < is->frame_timer + delay) {
1677  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1678  goto display;
1679  }
1680 
1681  is->frame_timer += delay;
1682  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1683  is->frame_timer = time;
1684 
1685  SDL_LockMutex(is->pictq.mutex);
1686  if (!isnan(vp->pts))
1687  update_video_pts(is, vp->pts, vp->serial);
1688  SDL_UnlockMutex(is->pictq.mutex);
1689 
1690  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1691  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1692  duration = vp_duration(is, vp, nextvp);
1693  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1694  is->frame_drops_late++;
1695  frame_queue_next(&is->pictq);
1696  goto retry;
1697  }
1698  }
1699 
1700  if (is->subtitle_st) {
1701  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1702  sp = frame_queue_peek(&is->subpq);
1703 
1704  if (frame_queue_nb_remaining(&is->subpq) > 1)
1705  sp2 = frame_queue_peek_next(&is->subpq);
1706  else
1707  sp2 = NULL;
1708 
1709  if (sp->serial != is->subtitleq.serial
1710  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1711  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1712  {
1713  if (sp->uploaded) {
1714  int i;
1715  for (i = 0; i < sp->sub.num_rects; i++) {
1716  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1717  uint8_t *pixels;
1718  int pitch, j;
1719 
1720  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
1721  for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1722  memset(pixels, 0, sub_rect->w << 2);
1723  SDL_UnlockTexture(is->sub_texture);
1724  }
1725  }
1726  }
1727  frame_queue_next(&is->subpq);
1728  } else {
1729  break;
1730  }
1731  }
1732  }
1733 
1734  frame_queue_next(&is->pictq);
1735  is->force_refresh = 1;
1736 
1737  if (is->step && !is->paused)
1739  }
1740 display:
1741  /* display picture */
1742  if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1743  video_display(is);
1744  }
1745  is->force_refresh = 0;
1746  if (show_status) {
1747  AVBPrint buf;
1748  static int64_t last_time;
1749  int64_t cur_time;
1750  int aqsize, vqsize, sqsize;
1751  double av_diff;
1752 
1753  cur_time = av_gettime_relative();
1754  if (!last_time || (cur_time - last_time) >= 30000) {
1755  aqsize = 0;
1756  vqsize = 0;
1757  sqsize = 0;
1758  if (is->audio_st)
1759  aqsize = is->audioq.size;
1760  if (is->video_st)
1761  vqsize = is->videoq.size;
1762  if (is->subtitle_st)
1763  sqsize = is->subtitleq.size;
1764  av_diff = 0;
1765  if (is->audio_st && is->video_st)
1766  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1767  else if (is->video_st)
1768  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1769  else if (is->audio_st)
1770  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1771 
1773  av_bprintf(&buf,
1774  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB \r",
1776  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1777  av_diff,
1778  is->frame_drops_early + is->frame_drops_late,
1779  aqsize / 1024,
1780  vqsize / 1024,
1781  sqsize);
1782 
1783  if (show_status == 1 && AV_LOG_INFO > av_log_get_level())
1784  fprintf(stderr, "%s", buf.str);
1785  else
1786  av_log(NULL, AV_LOG_INFO, "%s", buf.str);
1787 
1788  fflush(stderr);
1789  av_bprint_finalize(&buf, NULL);
1790 
1791  last_time = cur_time;
1792  }
1793  }
1794 }
1795 
1796 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1797 {
1798  Frame *vp;
1799 
1800 #if defined(DEBUG_SYNC)
1801  printf("frame_type=%c pts=%0.3f\n",
1802  av_get_picture_type_char(src_frame->pict_type), pts);
1803 #endif
1804 
1805  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1806  return -1;
1807 
1808  vp->sar = src_frame->sample_aspect_ratio;
1809  vp->uploaded = 0;
1810 
1811  vp->width = src_frame->width;
1812  vp->height = src_frame->height;
1813  vp->format = src_frame->format;
1814 
1815  vp->pts = pts;
1816  vp->duration = duration;
1817  vp->pos = pos;
1818  vp->serial = serial;
1819 
1820  set_default_window_size(vp->width, vp->height, vp->sar);
1821 
1822  av_frame_move_ref(vp->frame, src_frame);
1823  frame_queue_push(&is->pictq);
1824  return 0;
1825 }
1826 
1828 {
1829  int got_picture;
1830 
1831  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1832  return -1;
1833 
1834  if (got_picture) {
1835  double dpts = NAN;
1836 
1837  if (frame->pts != AV_NOPTS_VALUE)
1838  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1839 
1840  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1841 
1843  if (frame->pts != AV_NOPTS_VALUE) {
1844  double diff = dpts - get_master_clock(is);
1845  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1846  diff - is->frame_last_filter_delay < 0 &&
1847  is->viddec.pkt_serial == is->vidclk.serial &&
1848  is->videoq.nb_packets) {
1849  is->frame_drops_early++;
1851  got_picture = 0;
1852  }
1853  }
1854  }
1855  }
1856 
1857  return got_picture;
1858 }
1859 
1860 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1861  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1862 {
1863  int ret, i;
1864  int nb_filters = graph->nb_filters;
1866 
1867  if (filtergraph) {
1870  if (!outputs || !inputs) {
1871  ret = AVERROR(ENOMEM);
1872  goto fail;
1873  }
1874 
1875  outputs->name = av_strdup("in");
1876  outputs->filter_ctx = source_ctx;
1877  outputs->pad_idx = 0;
1878  outputs->next = NULL;
1879 
1880  inputs->name = av_strdup("out");
1881  inputs->filter_ctx = sink_ctx;
1882  inputs->pad_idx = 0;
1883  inputs->next = NULL;
1884 
1885  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1886  goto fail;
1887  } else {
1888  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1889  goto fail;
1890  }
1891 
1892  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1893  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1894  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1895 
1896  ret = avfilter_graph_config(graph, NULL);
1897 fail:
1900  return ret;
1901 }
1902 
1903 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1904 {
1906  char sws_flags_str[512] = "";
1907  int ret;
1908  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1909  AVCodecParameters *codecpar = is->video_st->codecpar;
1910  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1911  const AVDictionaryEntry *e = NULL;
1912  int nb_pix_fmts = 0;
1913  int i, j;
1915 
1916  if (!par)
1917  return AVERROR(ENOMEM);
1918 
1919  for (i = 0; i < renderer_info.num_texture_formats; i++) {
1920  for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map); j++) {
1921  if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
1922  pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
1923  break;
1924  }
1925  }
1926  }
1927 
1928  while ((e = av_dict_iterate(sws_dict, e))) {
1929  if (!strcmp(e->key, "sws_flags")) {
1930  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1931  } else
1932  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1933  }
1934  if (strlen(sws_flags_str))
1935  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1936 
1937  graph->scale_sws_opts = av_strdup(sws_flags_str);
1938 
1939 
1940  filt_src = avfilter_graph_alloc_filter(graph, avfilter_get_by_name("buffer"),
1941  "ffplay_buffer");
1942  if (!filt_src) {
1943  ret = AVERROR(ENOMEM);
1944  goto fail;
1945  }
1946 
1947  par->format = frame->format;
1948  par->time_base = is->video_st->time_base;
1949  par->width = frame->width;
1950  par->height = frame->height;
1951  par->sample_aspect_ratio = codecpar->sample_aspect_ratio;
1952  par->color_space = frame->colorspace;
1953  par->color_range = frame->color_range;
1954  par->alpha_mode = frame->alpha_mode;
1955  par->frame_rate = fr;
1956  par->hw_frames_ctx = frame->hw_frames_ctx;
1957  ret = av_buffersrc_parameters_set(filt_src, par);
1958  if (ret < 0)
1959  goto fail;
1960 
1961  ret = avfilter_init_dict(filt_src, NULL);
1962  if (ret < 0)
1963  goto fail;
1964 
1965  filt_out = avfilter_graph_alloc_filter(graph, avfilter_get_by_name("buffersink"),
1966  "ffplay_buffersink");
1967  if (!filt_out) {
1968  ret = AVERROR(ENOMEM);
1969  goto fail;
1970  }
1971 
1972  if ((ret = av_opt_set_array(filt_out, "pixel_formats", AV_OPT_SEARCH_CHILDREN,
1973  0, nb_pix_fmts, AV_OPT_TYPE_PIXEL_FMT, pix_fmts)) < 0)
1974  goto fail;
1975  if (!vk_renderer &&
1976  (ret = av_opt_set_array(filt_out, "colorspaces", AV_OPT_SEARCH_CHILDREN,
1979  goto fail;
1980 
1981  if ((ret = av_opt_set_array(filt_out, "alphamodes", AV_OPT_SEARCH_CHILDREN,
1984  goto fail;
1985 
1986  ret = avfilter_init_dict(filt_out, NULL);
1987  if (ret < 0)
1988  goto fail;
1989 
1990  last_filter = filt_out;
1991 
1992 /* Note: this macro adds a filter before the lastly added filter, so the
1993  * processing order of the filters is in reverse */
1994 #define INSERT_FILT(name, arg) do { \
1995  AVFilterContext *filt_ctx; \
1996  \
1997  ret = avfilter_graph_create_filter(&filt_ctx, \
1998  avfilter_get_by_name(name), \
1999  "ffplay_" name, arg, NULL, graph); \
2000  if (ret < 0) \
2001  goto fail; \
2002  \
2003  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
2004  if (ret < 0) \
2005  goto fail; \
2006  \
2007  last_filter = filt_ctx; \
2008 } while (0)
2009 
2010  if (autorotate) {
2011  double theta = 0.0;
2012  int32_t *displaymatrix = NULL;
2014  if (sd)
2015  displaymatrix = (int32_t *)sd->data;
2016  if (!displaymatrix) {
2017  const AVPacketSideData *psd = av_packet_side_data_get(is->video_st->codecpar->coded_side_data,
2018  is->video_st->codecpar->nb_coded_side_data,
2020  if (psd)
2021  displaymatrix = (int32_t *)psd->data;
2022  }
2023  theta = get_rotation(displaymatrix);
2024 
2025  if (fabs(theta - 90) < 1.0) {
2026  INSERT_FILT("transpose", displaymatrix[3] > 0 ? "cclock_flip" : "clock");
2027  } else if (fabs(theta - 180) < 1.0) {
2028  if (displaymatrix[0] < 0)
2029  INSERT_FILT("hflip", NULL);
2030  if (displaymatrix[4] < 0)
2031  INSERT_FILT("vflip", NULL);
2032  } else if (fabs(theta - 270) < 1.0) {
2033  INSERT_FILT("transpose", displaymatrix[3] < 0 ? "clock_flip" : "cclock");
2034  } else if (fabs(theta) > 1.0) {
2035  char rotate_buf[64];
2036  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
2037  INSERT_FILT("rotate", rotate_buf);
2038  } else {
2039  if (displaymatrix && displaymatrix[4] < 0)
2040  INSERT_FILT("vflip", NULL);
2041  }
2042  }
2043 
2044  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
2045  goto fail;
2046 
2047  is->in_video_filter = filt_src;
2048  is->out_video_filter = filt_out;
2049 
2050 fail:
2051  av_freep(&par);
2052  return ret;
2053 }
2054 
2055 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
2056 {
2057  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
2058  char aresample_swr_opts[512] = "";
2059  const AVDictionaryEntry *e = NULL;
2060  AVBPrint bp;
2061  char asrc_args[256];
2062  int ret;
2063 
2064  avfilter_graph_free(&is->agraph);
2065  if (!(is->agraph = avfilter_graph_alloc()))
2066  return AVERROR(ENOMEM);
2067  is->agraph->nb_threads = filter_nbthreads;
2068 
2070 
2071  while ((e = av_dict_iterate(swr_opts, e)))
2072  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
2073  if (strlen(aresample_swr_opts))
2074  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
2075  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
2076 
2077  av_channel_layout_describe_bprint(&is->audio_filter_src.ch_layout, &bp);
2078 
2079  ret = snprintf(asrc_args, sizeof(asrc_args),
2080  "sample_rate=%d:sample_fmt=%s:time_base=%d/%d:channel_layout=%s",
2081  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
2082  1, is->audio_filter_src.freq, bp.str);
2083 
2084  ret = avfilter_graph_create_filter(&filt_asrc,
2085  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
2086  asrc_args, NULL, is->agraph);
2087  if (ret < 0)
2088  goto end;
2089 
2090  filt_asink = avfilter_graph_alloc_filter(is->agraph, avfilter_get_by_name("abuffersink"),
2091  "ffplay_abuffersink");
2092  if (!filt_asink) {
2093  ret = AVERROR(ENOMEM);
2094  goto end;
2095  }
2096 
2097  if ((ret = av_opt_set(filt_asink, "sample_formats", "s16", AV_OPT_SEARCH_CHILDREN)) < 0)
2098  goto end;
2099 
2100  if (force_output_format) {
2101  if ((ret = av_opt_set_array(filt_asink, "channel_layouts", AV_OPT_SEARCH_CHILDREN,
2102  0, 1, AV_OPT_TYPE_CHLAYOUT, &is->audio_tgt.ch_layout)) < 0)
2103  goto end;
2104  if ((ret = av_opt_set_array(filt_asink, "samplerates", AV_OPT_SEARCH_CHILDREN,
2105  0, 1, AV_OPT_TYPE_INT, &is->audio_tgt.freq)) < 0)
2106  goto end;
2107  }
2108 
2109  ret = avfilter_init_dict(filt_asink, NULL);
2110  if (ret < 0)
2111  goto end;
2112 
2113  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2114  goto end;
2115 
2116  is->in_audio_filter = filt_asrc;
2117  is->out_audio_filter = filt_asink;
2118 
2119 end:
2120  if (ret < 0)
2121  avfilter_graph_free(&is->agraph);
2122  av_bprint_finalize(&bp, NULL);
2123 
2124  return ret;
2125 }
2126 
2127 static int audio_thread(void *arg)
2128 {
2129  VideoState *is = arg;
2131  Frame *af;
2132  int last_serial = -1;
2133  int reconfigure;
2134  int got_frame = 0;
2135  AVRational tb;
2136  int ret = 0;
2137 
2138  if (!frame)
2139  return AVERROR(ENOMEM);
2140 
2141  do {
2142  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2143  goto the_end;
2144 
2145  if (got_frame) {
2146  tb = (AVRational){1, frame->sample_rate};
2147 
2148  reconfigure =
2149  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.ch_layout.nb_channels,
2150  frame->format, frame->ch_layout.nb_channels) ||
2151  av_channel_layout_compare(&is->audio_filter_src.ch_layout, &frame->ch_layout) ||
2152  is->audio_filter_src.freq != frame->sample_rate ||
2153  is->auddec.pkt_serial != last_serial;
2154 
2155  if (reconfigure) {
2156  char buf1[1024], buf2[1024];
2157  av_channel_layout_describe(&is->audio_filter_src.ch_layout, buf1, sizeof(buf1));
2158  av_channel_layout_describe(&frame->ch_layout, buf2, sizeof(buf2));
2160  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2161  is->audio_filter_src.freq, is->audio_filter_src.ch_layout.nb_channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2162  frame->sample_rate, frame->ch_layout.nb_channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
2163 
2164  is->audio_filter_src.fmt = frame->format;
2165  ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &frame->ch_layout);
2166  if (ret < 0)
2167  goto the_end;
2168  is->audio_filter_src.freq = frame->sample_rate;
2169  last_serial = is->auddec.pkt_serial;
2170 
2171  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2172  goto the_end;
2173  }
2174 
2175  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2176  goto the_end;
2177 
2178  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2179  FrameData *fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL;
2180  tb = av_buffersink_get_time_base(is->out_audio_filter);
2181  if (!(af = frame_queue_peek_writable(&is->sampq)))
2182  goto the_end;
2183 
2184  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2185  af->pos = fd ? fd->pkt_pos : -1;
2186  af->serial = is->auddec.pkt_serial;
2187  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2188 
2190  frame_queue_push(&is->sampq);
2191 
2192  if (is->audioq.serial != is->auddec.pkt_serial)
2193  break;
2194  }
2195  if (ret == AVERROR_EOF)
2196  is->auddec.finished = is->auddec.pkt_serial;
2197  }
2198  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2199  the_end:
2200  avfilter_graph_free(&is->agraph);
2201  av_frame_free(&frame);
2202  return ret;
2203 }
2204 
2205 static int decoder_start(Decoder *d, int (*fn)(void *), const char *thread_name, void* arg)
2206 {
2208  d->decoder_tid = SDL_CreateThread(fn, thread_name, arg);
2209  if (!d->decoder_tid) {
2210  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2211  return AVERROR(ENOMEM);
2212  }
2213  return 0;
2214 }
2215 
2216 static int video_thread(void *arg)
2217 {
2218  VideoState *is = arg;
2220  double pts;
2221  double duration;
2222  int ret;
2223  AVRational tb = is->video_st->time_base;
2224  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2225 
2226  AVFilterGraph *graph = NULL;
2227  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2228  int last_w = 0;
2229  int last_h = 0;
2230  enum AVPixelFormat last_format = -2;
2231  int last_serial = -1;
2232  int last_vfilter_idx = 0;
2233 
2234  if (!frame)
2235  return AVERROR(ENOMEM);
2236 
2237  for (;;) {
2239  if (ret < 0)
2240  goto the_end;
2241  if (!ret)
2242  continue;
2243 
2244  if ( last_w != frame->width
2245  || last_h != frame->height
2246  || last_format != frame->format
2247  || last_serial != is->viddec.pkt_serial
2248  || last_vfilter_idx != is->vfilter_idx) {
2250  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2251  last_w, last_h,
2252  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2253  frame->width, frame->height,
2254  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2255  avfilter_graph_free(&graph);
2256  graph = avfilter_graph_alloc();
2257  if (!graph) {
2258  ret = AVERROR(ENOMEM);
2259  goto the_end;
2260  }
2261  graph->nb_threads = filter_nbthreads;
2262  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2263  SDL_Event event;
2264  event.type = FF_QUIT_EVENT;
2265  event.user.data1 = is;
2266  SDL_PushEvent(&event);
2267  goto the_end;
2268  }
2269  filt_in = is->in_video_filter;
2270  filt_out = is->out_video_filter;
2271  last_w = frame->width;
2272  last_h = frame->height;
2273  last_format = frame->format;
2274  last_serial = is->viddec.pkt_serial;
2275  last_vfilter_idx = is->vfilter_idx;
2276  frame_rate = av_buffersink_get_frame_rate(filt_out);
2277  }
2278 
2279  ret = av_buffersrc_add_frame(filt_in, frame);
2280  if (ret < 0)
2281  goto the_end;
2282 
2283  while (ret >= 0) {
2284  FrameData *fd;
2285 
2286  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2287 
2288  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2289  if (ret < 0) {
2290  if (ret == AVERROR_EOF)
2291  is->viddec.finished = is->viddec.pkt_serial;
2292  ret = 0;
2293  break;
2294  }
2295 
2296  fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL;
2297 
2298  is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
2299  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2300  is->frame_last_filter_delay = 0;
2301  tb = av_buffersink_get_time_base(filt_out);
2302  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2303  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2304  ret = queue_picture(is, frame, pts, duration, fd ? fd->pkt_pos : -1, is->viddec.pkt_serial);
2306  if (is->videoq.serial != is->viddec.pkt_serial)
2307  break;
2308  }
2309 
2310  if (ret < 0)
2311  goto the_end;
2312  }
2313  the_end:
2314  avfilter_graph_free(&graph);
2315  av_frame_free(&frame);
2316  return 0;
2317 }
2318 
2319 static int subtitle_thread(void *arg)
2320 {
2321  VideoState *is = arg;
2322  Frame *sp;
2323  int got_subtitle;
2324  double pts;
2325 
2326  for (;;) {
2327  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2328  return 0;
2329 
2330  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2331  break;
2332 
2333  pts = 0;
2334 
2335  if (got_subtitle && sp->sub.format == 0) {
2336  if (sp->sub.pts != AV_NOPTS_VALUE)
2337  pts = sp->sub.pts / (double)AV_TIME_BASE;
2338  sp->pts = pts;
2339  sp->serial = is->subdec.pkt_serial;
2340  sp->width = is->subdec.avctx->width;
2341  sp->height = is->subdec.avctx->height;
2342  sp->uploaded = 0;
2343 
2344  /* now we can update the picture count */
2345  frame_queue_push(&is->subpq);
2346  } else if (got_subtitle) {
2347  avsubtitle_free(&sp->sub);
2348  }
2349  }
2350  return 0;
2351 }
2352 
2353 /* copy samples for viewing in editor window */
2354 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2355 {
2356  int size, len;
2357 
2358  size = samples_size / sizeof(short);
2359  while (size > 0) {
2360  len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2361  if (len > size)
2362  len = size;
2363  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2364  samples += len;
2365  is->sample_array_index += len;
2366  if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2367  is->sample_array_index = 0;
2368  size -= len;
2369  }
2370 }
2371 
2372 /* return the wanted number of samples to get better sync if sync_type is video
2373  * or external master clock */
2374 static int synchronize_audio(VideoState *is, int nb_samples)
2375 {
2376  int wanted_nb_samples = nb_samples;
2377 
2378  /* if not master, then we try to remove or add samples to correct the clock */
2380  double diff, avg_diff;
2381  int min_nb_samples, max_nb_samples;
2382 
2383  diff = get_clock(&is->audclk) - get_master_clock(is);
2384 
2385  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2386  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2387  if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2388  /* not enough measures to have a correct estimate */
2389  is->audio_diff_avg_count++;
2390  } else {
2391  /* estimate the A-V difference */
2392  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2393 
2394  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2395  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2396  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2397  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2398  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2399  }
2400  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2401  diff, avg_diff, wanted_nb_samples - nb_samples,
2402  is->audio_clock, is->audio_diff_threshold);
2403  }
2404  } else {
2405  /* too big difference : may be initial PTS errors, so
2406  reset A-V filter */
2407  is->audio_diff_avg_count = 0;
2408  is->audio_diff_cum = 0;
2409  }
2410  }
2411 
2412  return wanted_nb_samples;
2413 }
2414 
2415 /**
2416  * Decode one audio frame and return its uncompressed size.
2417  *
2418  * The processed audio frame is decoded, converted if required, and
2419  * stored in is->audio_buf, with size in bytes given by the return
2420  * value.
2421  */
2423 {
2424  int data_size, resampled_data_size;
2425  av_unused double audio_clock0;
2426  int wanted_nb_samples;
2427  Frame *af;
2428 
2429  if (is->paused)
2430  return -1;
2431 
2432  do {
2433 #if defined(_WIN32)
2434  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2435  if ((av_gettime_relative() - audio_callback_time) > 1000000LL * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec / 2)
2436  return -1;
2437  av_usleep (1000);
2438  }
2439 #endif
2440  if (!(af = frame_queue_peek_readable(&is->sampq)))
2441  return -1;
2442  frame_queue_next(&is->sampq);
2443  } while (af->serial != is->audioq.serial);
2444 
2446  af->frame->nb_samples,
2447  af->frame->format, 1);
2448 
2449  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2450 
2451  if (af->frame->format != is->audio_src.fmt ||
2452  av_channel_layout_compare(&af->frame->ch_layout, &is->audio_src.ch_layout) ||
2453  af->frame->sample_rate != is->audio_src.freq ||
2454  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2455  int ret;
2456  swr_free(&is->swr_ctx);
2457  ret = swr_alloc_set_opts2(&is->swr_ctx,
2458  &is->audio_tgt.ch_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2459  &af->frame->ch_layout, af->frame->format, af->frame->sample_rate,
2460  0, NULL);
2461  if (ret < 0 || swr_init(is->swr_ctx) < 0) {
2463  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2465  is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.ch_layout.nb_channels);
2466  swr_free(&is->swr_ctx);
2467  return -1;
2468  }
2469  if (av_channel_layout_copy(&is->audio_src.ch_layout, &af->frame->ch_layout) < 0)
2470  return -1;
2471  is->audio_src.freq = af->frame->sample_rate;
2472  is->audio_src.fmt = af->frame->format;
2473  }
2474 
2475  if (is->swr_ctx) {
2476  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2477  uint8_t **out = &is->audio_buf1;
2478  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2479  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.ch_layout.nb_channels, out_count, is->audio_tgt.fmt, 0);
2480  int len2;
2481  if (out_size < 0) {
2482  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2483  return -1;
2484  }
2485  if (wanted_nb_samples != af->frame->nb_samples) {
2486  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2487  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2488  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2489  return -1;
2490  }
2491  }
2492  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2493  if (!is->audio_buf1)
2494  return AVERROR(ENOMEM);
2495  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2496  if (len2 < 0) {
2497  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2498  return -1;
2499  }
2500  if (len2 == out_count) {
2501  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2502  if (swr_init(is->swr_ctx) < 0)
2503  swr_free(&is->swr_ctx);
2504  }
2505  is->audio_buf = is->audio_buf1;
2506  resampled_data_size = len2 * is->audio_tgt.ch_layout.nb_channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2507  } else {
2508  is->audio_buf = af->frame->data[0];
2509  resampled_data_size = data_size;
2510  }
2511 
2512  audio_clock0 = is->audio_clock;
2513  /* update the audio clock with the pts */
2514  if (!isnan(af->pts))
2515  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2516  else
2517  is->audio_clock = NAN;
2518  is->audio_clock_serial = af->serial;
2519 #ifdef DEBUG
2520  {
2521  static double last_clock;
2522  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2523  is->audio_clock - last_clock,
2524  is->audio_clock, audio_clock0);
2525  last_clock = is->audio_clock;
2526  }
2527 #endif
2528  return resampled_data_size;
2529 }
2530 
2531 /* prepare a new audio buffer */
2532 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2533 {
2534  VideoState *is = opaque;
2535  int audio_size, len1;
2536 
2538 
2539  while (len > 0) {
2540  if (is->audio_buf_index >= is->audio_buf_size) {
2541  audio_size = audio_decode_frame(is);
2542  if (audio_size < 0) {
2543  /* if error, just output silence */
2544  is->audio_buf = NULL;
2545  is->audio_buf_size = SDL_AUDIO_MIN_BUFFER_SIZE / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2546  } else {
2547  if (is->show_mode != SHOW_MODE_VIDEO)
2548  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2549  is->audio_buf_size = audio_size;
2550  }
2551  is->audio_buf_index = 0;
2552  }
2553  len1 = is->audio_buf_size - is->audio_buf_index;
2554  if (len1 > len)
2555  len1 = len;
2556  if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2557  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2558  else {
2559  memset(stream, 0, len1);
2560  if (!is->muted && is->audio_buf)
2561  SDL_MixAudioFormat(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, is->audio_volume);
2562  }
2563  len -= len1;
2564  stream += len1;
2565  is->audio_buf_index += len1;
2566  }
2567  is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2568  /* Let's assume the audio driver that is used by SDL has two periods. */
2569  if (!isnan(is->audio_clock)) {
2570  set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
2571  sync_clock_to_slave(&is->extclk, &is->audclk);
2572  }
2573 }
2574 
2575 static int audio_open(void *opaque, AVChannelLayout *wanted_channel_layout, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2576 {
2577  SDL_AudioSpec wanted_spec, spec;
2578  const char *env;
2579  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2580  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2581  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2582  int wanted_nb_channels = wanted_channel_layout->nb_channels;
2583 
2584  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2585  if (env) {
2586  wanted_nb_channels = atoi(env);
2587  av_channel_layout_uninit(wanted_channel_layout);
2588  av_channel_layout_default(wanted_channel_layout, wanted_nb_channels);
2589  }
2590  if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) {
2591  av_channel_layout_uninit(wanted_channel_layout);
2592  av_channel_layout_default(wanted_channel_layout, wanted_nb_channels);
2593  }
2594  wanted_nb_channels = wanted_channel_layout->nb_channels;
2595  wanted_spec.channels = wanted_nb_channels;
2596  wanted_spec.freq = wanted_sample_rate;
2597  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2598  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2599  return -1;
2600  }
2601  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2602  next_sample_rate_idx--;
2603  wanted_spec.format = AUDIO_S16SYS;
2604  wanted_spec.silence = 0;
2605  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2606  wanted_spec.callback = sdl_audio_callback;
2607  wanted_spec.userdata = opaque;
2608  while (!(audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2609  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2610  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2611  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2612  if (!wanted_spec.channels) {
2613  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2614  wanted_spec.channels = wanted_nb_channels;
2615  if (!wanted_spec.freq) {
2617  "No more combinations to try, audio open failed\n");
2618  return -1;
2619  }
2620  }
2621  av_channel_layout_default(wanted_channel_layout, wanted_spec.channels);
2622  }
2623  if (spec.format != AUDIO_S16SYS) {
2625  "SDL advised audio format %d is not supported!\n", spec.format);
2626  return -1;
2627  }
2628  if (spec.channels != wanted_spec.channels) {
2629  av_channel_layout_uninit(wanted_channel_layout);
2630  av_channel_layout_default(wanted_channel_layout, spec.channels);
2631  if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) {
2633  "SDL advised channel count %d is not supported!\n", spec.channels);
2634  return -1;
2635  }
2636  }
2637 
2638  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2639  audio_hw_params->freq = spec.freq;
2640  if (av_channel_layout_copy(&audio_hw_params->ch_layout, wanted_channel_layout) < 0)
2641  return -1;
2642  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, 1, audio_hw_params->fmt, 1);
2643  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2644  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2645  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2646  return -1;
2647  }
2648  return spec.size;
2649 }
2650 
2651 static int create_hwaccel(AVBufferRef **device_ctx)
2652 {
2653  enum AVHWDeviceType type;
2654  int ret;
2655  AVBufferRef *vk_dev;
2656 
2657  *device_ctx = NULL;
2658 
2659  if (!hwaccel)
2660  return 0;
2661 
2663  if (type == AV_HWDEVICE_TYPE_NONE)
2664  return AVERROR(ENOTSUP);
2665 
2666  if (!vk_renderer) {
2667  av_log(NULL, AV_LOG_ERROR, "Vulkan renderer is not available\n");
2668  return AVERROR(ENOTSUP);
2669  }
2670 
2672  if (ret < 0)
2673  return ret;
2674 
2675  ret = av_hwdevice_ctx_create_derived(device_ctx, type, vk_dev, 0);
2676  if (!ret)
2677  return 0;
2678 
2679  if (ret != AVERROR(ENOSYS))
2680  return ret;
2681 
2682  av_log(NULL, AV_LOG_WARNING, "Derive %s from vulkan not supported.\n", hwaccel);
2683  ret = av_hwdevice_ctx_create(device_ctx, type, NULL, NULL, 0);
2684  return ret;
2685 }
2686 
2687 /* open a given stream. Return 0 if OK */
2688 static int stream_component_open(VideoState *is, int stream_index)
2689 {
2690  AVFormatContext *ic = is->ic;
2691  AVCodecContext *avctx;
2692  const AVCodec *codec;
2693  const char *forced_codec_name = NULL;
2694  AVDictionary *opts = NULL;
2695  int sample_rate;
2696  AVChannelLayout ch_layout = { 0 };
2697  int ret = 0;
2698  int stream_lowres = lowres;
2699 
2700  if (stream_index < 0 || stream_index >= ic->nb_streams)
2701  return -1;
2702 
2703  avctx = avcodec_alloc_context3(NULL);
2704  if (!avctx)
2705  return AVERROR(ENOMEM);
2706 
2707  ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2708  if (ret < 0)
2709  goto fail;
2710  avctx->pkt_timebase = ic->streams[stream_index]->time_base;
2711 
2712  codec = avcodec_find_decoder(avctx->codec_id);
2713 
2714  switch(avctx->codec_type){
2715  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2716  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2717  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2718  }
2719  if (forced_codec_name)
2720  codec = avcodec_find_decoder_by_name(forced_codec_name);
2721  if (!codec) {
2722  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2723  "No codec could be found with name '%s'\n", forced_codec_name);
2724  else av_log(NULL, AV_LOG_WARNING,
2725  "No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id));
2726  ret = AVERROR(EINVAL);
2727  goto fail;
2728  }
2729 
2730  avctx->codec_id = codec->id;
2731  if (stream_lowres > codec->max_lowres) {
2732  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2733  codec->max_lowres);
2734  stream_lowres = codec->max_lowres;
2735  }
2736  avctx->lowres = stream_lowres;
2737 
2738  if (fast)
2739  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2740 
2741  ret = filter_codec_opts(codec_opts, avctx->codec_id, ic,
2742  ic->streams[stream_index], codec, &opts, NULL);
2743  if (ret < 0)
2744  goto fail;
2745 
2746  if (!av_dict_get(opts, "threads", NULL, 0))
2747  av_dict_set(&opts, "threads", "auto", 0);
2748  if (stream_lowres)
2749  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2750 
2751  av_dict_set(&opts, "flags", "+copy_opaque", AV_DICT_MULTIKEY);
2752 
2753  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2754  ret = create_hwaccel(&avctx->hw_device_ctx);
2755  if (ret < 0)
2756  goto fail;
2757  }
2758 
2759  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2760  goto fail;
2761  }
2763  if (ret < 0)
2764  goto fail;
2765 
2766  is->eof = 0;
2767  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2768  switch (avctx->codec_type) {
2769  case AVMEDIA_TYPE_AUDIO:
2770  {
2771  AVFilterContext *sink;
2772 
2773  is->audio_filter_src.freq = avctx->sample_rate;
2774  ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &avctx->ch_layout);
2775  if (ret < 0)
2776  goto fail;
2777  is->audio_filter_src.fmt = avctx->sample_fmt;
2778  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2779  goto fail;
2780  sink = is->out_audio_filter;
2781  sample_rate = av_buffersink_get_sample_rate(sink);
2782  ret = av_buffersink_get_ch_layout(sink, &ch_layout);
2783  if (ret < 0)
2784  goto fail;
2785  }
2786 
2787  /* prepare audio output */
2788  if ((ret = audio_open(is, &ch_layout, sample_rate, &is->audio_tgt)) < 0)
2789  goto fail;
2790  is->audio_hw_buf_size = ret;
2791  is->audio_src = is->audio_tgt;
2792  is->audio_buf_size = 0;
2793  is->audio_buf_index = 0;
2794 
2795  /* init averaging filter */
2796  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2797  is->audio_diff_avg_count = 0;
2798  /* since we do not have a precise anough audio FIFO fullness,
2799  we correct audio sync only if larger than this threshold */
2800  is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;
2801 
2802  is->audio_stream = stream_index;
2803  is->audio_st = ic->streams[stream_index];
2804 
2805  if ((ret = decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread)) < 0)
2806  goto fail;
2807  if (is->ic->iformat->flags & AVFMT_NOTIMESTAMPS) {
2808  is->auddec.start_pts = is->audio_st->start_time;
2809  is->auddec.start_pts_tb = is->audio_st->time_base;
2810  }
2811  if ((ret = decoder_start(&is->auddec, audio_thread, "audio_decoder", is)) < 0)
2812  goto out;
2813  SDL_PauseAudioDevice(audio_dev, 0);
2814  break;
2815  case AVMEDIA_TYPE_VIDEO:
2816  is->video_stream = stream_index;
2817  is->video_st = ic->streams[stream_index];
2818 
2819  if ((ret = decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread)) < 0)
2820  goto fail;
2821  if ((ret = decoder_start(&is->viddec, video_thread, "video_decoder", is)) < 0)
2822  goto out;
2823  is->queue_attachments_req = 1;
2824  break;
2825  case AVMEDIA_TYPE_SUBTITLE:
2826  is->subtitle_stream = stream_index;
2827  is->subtitle_st = ic->streams[stream_index];
2828 
2829  if ((ret = decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread)) < 0)
2830  goto fail;
2831  if ((ret = decoder_start(&is->subdec, subtitle_thread, "subtitle_decoder", is)) < 0)
2832  goto out;
2833  break;
2834  default:
2835  break;
2836  }
2837  goto out;
2838 
2839 fail:
2840  avcodec_free_context(&avctx);
2841 out:
2842  av_channel_layout_uninit(&ch_layout);
2843  av_dict_free(&opts);
2844 
2845  return ret;
2846 }
2847 
2848 static int decode_interrupt_cb(void *ctx)
2849 {
2850  VideoState *is = ctx;
2851  return is->abort_request;
2852 }
2853 
2854 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2855  return stream_id < 0 ||
2856  queue->abort_request ||
2858  queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2859 }
2860 
2862 {
2863  if( !strcmp(s->iformat->name, "rtp")
2864  || !strcmp(s->iformat->name, "rtsp")
2865  || !strcmp(s->iformat->name, "sdp")
2866  )
2867  return 1;
2868 
2869  if(s->pb && ( !strncmp(s->url, "rtp:", 4)
2870  || !strncmp(s->url, "udp:", 4)
2871  )
2872  )
2873  return 1;
2874  return 0;
2875 }
2876 
2877 /* this thread gets the stream from the disk or the network */
2878 static int read_thread(void *arg)
2879 {
2880  VideoState *is = arg;
2881  AVFormatContext *ic = NULL;
2882  int err, i, ret;
2883  int st_index[AVMEDIA_TYPE_NB];
2884  AVPacket *pkt = NULL;
2885  int64_t stream_start_time;
2886  char metadata_description[96];
2887  int pkt_in_play_range = 0;
2888  const AVDictionaryEntry *t;
2889  SDL_mutex *wait_mutex = SDL_CreateMutex();
2890  int scan_all_pmts_set = 0;
2891  int64_t pkt_ts;
2892 
2893  if (!wait_mutex) {
2894  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2895  ret = AVERROR(ENOMEM);
2896  goto fail;
2897  }
2898 
2899  memset(st_index, -1, sizeof(st_index));
2900  is->eof = 0;
2901 
2902  pkt = av_packet_alloc();
2903  if (!pkt) {
2904  av_log(NULL, AV_LOG_FATAL, "Could not allocate packet.\n");
2905  ret = AVERROR(ENOMEM);
2906  goto fail;
2907  }
2908  ic = avformat_alloc_context();
2909  if (!ic) {
2910  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2911  ret = AVERROR(ENOMEM);
2912  goto fail;
2913  }
2916  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2917  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2918  scan_all_pmts_set = 1;
2919  }
2920  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2921  if (err < 0) {
2922  print_error(is->filename, err);
2923  ret = -1;
2924  goto fail;
2925  }
2926  if (scan_all_pmts_set)
2927  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2929 
2931  if (ret < 0)
2932  goto fail;
2933  is->ic = ic;
2934 
2935  if (genpts)
2936  ic->flags |= AVFMT_FLAG_GENPTS;
2937 
2938  if (find_stream_info) {
2939  AVDictionary **opts;
2940  int orig_nb_streams = ic->nb_streams;
2941 
2943  if (err < 0) {
2945  "Error setting up avformat_find_stream_info() options\n");
2946  ret = err;
2947  goto fail;
2948  }
2949 
2950  err = avformat_find_stream_info(ic, opts);
2951 
2952  for (i = 0; i < orig_nb_streams; i++)
2953  av_dict_free(&opts[i]);
2954  av_freep(&opts);
2955 
2956  if (err < 0) {
2958  "%s: could not find codec parameters\n", is->filename);
2959  ret = -1;
2960  goto fail;
2961  }
2962  }
2963 
2964  if (ic->pb)
2965  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2966 
2967  if (seek_by_bytes < 0)
2969  !!(ic->iformat->flags & AVFMT_TS_DISCONT) &&
2970  strcmp("ogg", ic->iformat->name);
2971 
2972  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2973 
2974  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2975  window_title = av_asprintf("%s - %s", t->value, input_filename);
2976 
2977  /* if seeking requested, we execute it */
2978  if (start_time != AV_NOPTS_VALUE) {
2979  int64_t timestamp;
2980 
2981  timestamp = start_time;
2982  /* add the stream start time */
2983  if (ic->start_time != AV_NOPTS_VALUE)
2984  timestamp += ic->start_time;
2985  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2986  if (ret < 0) {
2987  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2988  is->filename, (double)timestamp / AV_TIME_BASE);
2989  }
2990  }
2991 
2992  is->realtime = is_realtime(ic);
2993 
2994  if (show_status) {
2995  fprintf(stderr, "\x1b[2K\r");
2996  av_dump_format(ic, 0, is->filename, 0);
2997  }
2998 
2999  for (i = 0; i < ic->nb_streams; i++) {
3000  AVStream *st = ic->streams[i];
3001  enum AVMediaType type = st->codecpar->codec_type;
3002  st->discard = AVDISCARD_ALL;
3003  if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
3005  st_index[type] = i;
3006  // Clear all pre-existing metadata update flags to avoid printing
3007  // initial metadata as update.
3009  }
3010  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
3011  if (wanted_stream_spec[i] && st_index[i] == -1) {
3012  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
3013  st_index[i] = INT_MAX;
3014  }
3015  }
3016 
3017  if (!video_disable)
3018  st_index[AVMEDIA_TYPE_VIDEO] =
3020  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
3021  if (!audio_disable)
3022  st_index[AVMEDIA_TYPE_AUDIO] =
3024  st_index[AVMEDIA_TYPE_AUDIO],
3025  st_index[AVMEDIA_TYPE_VIDEO],
3026  NULL, 0);
3028  st_index[AVMEDIA_TYPE_SUBTITLE] =
3030  st_index[AVMEDIA_TYPE_SUBTITLE],
3031  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
3032  st_index[AVMEDIA_TYPE_AUDIO] :
3033  st_index[AVMEDIA_TYPE_VIDEO]),
3034  NULL, 0);
3035 
3036  is->show_mode = show_mode;
3037  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
3038  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
3039  AVCodecParameters *codecpar = st->codecpar;
3041  if (codecpar->width)
3042  set_default_window_size(codecpar->width, codecpar->height, sar);
3043  }
3044 
3045  /* open the streams */
3046  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
3048  }
3049 
3050  ret = -1;
3051  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
3053  }
3054  if (is->show_mode == SHOW_MODE_NONE)
3055  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
3056 
3057  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
3059  }
3060 
3061  if (is->video_stream < 0 && is->audio_stream < 0) {
3062  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
3063  is->filename);
3064  ret = -1;
3065  goto fail;
3066  }
3067 
3068  if (infinite_buffer < 0 && is->realtime)
3069  infinite_buffer = 1;
3070 
3071  for (;;) {
3072  if (is->abort_request)
3073  break;
3074  if (is->paused != is->last_paused) {
3075  is->last_paused = is->paused;
3076  if (is->paused)
3077  is->read_pause_return = av_read_pause(ic);
3078  else
3079  av_read_play(ic);
3080  }
3081 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
3082  if (is->paused &&
3083  (!strcmp(ic->iformat->name, "rtsp") ||
3084  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
3085  /* wait 10 ms to avoid trying to get another packet */
3086  /* XXX: horrible */
3087  SDL_Delay(10);
3088  continue;
3089  }
3090 #endif
3091  if (is->seek_req) {
3092  int64_t seek_target = is->seek_pos;
3093  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
3094  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
3095 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
3096 // of the seek_pos/seek_rel variables
3097 
3098  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
3099  if (ret < 0) {
3101  "%s: error while seeking\n", is->ic->url);
3102  } else {
3103  if (is->audio_stream >= 0)
3104  packet_queue_flush(&is->audioq);
3105  if (is->subtitle_stream >= 0)
3106  packet_queue_flush(&is->subtitleq);
3107  if (is->video_stream >= 0)
3108  packet_queue_flush(&is->videoq);
3109  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
3110  set_clock(&is->extclk, NAN, 0);
3111  } else {
3112  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
3113  }
3114  }
3115  is->seek_req = 0;
3116  is->queue_attachments_req = 1;
3117  is->eof = 0;
3118  if (is->paused)
3120  }
3121  if (is->queue_attachments_req) {
3122  if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
3123  if ((ret = av_packet_ref(pkt, &is->video_st->attached_pic)) < 0)
3124  goto fail;
3125  packet_queue_put(&is->videoq, pkt);
3126  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
3127  }
3128  is->queue_attachments_req = 0;
3129  }
3130 
3131  /* if the queue are full, no need to read more */
3132  if (infinite_buffer<1 &&
3133  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
3134  || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
3135  stream_has_enough_packets(is->video_st, is->video_stream, &is->videoq) &&
3136  stream_has_enough_packets(is->subtitle_st, is->subtitle_stream, &is->subtitleq)))) {
3137  /* wait 10 ms */
3138  SDL_LockMutex(wait_mutex);
3139  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3140  SDL_UnlockMutex(wait_mutex);
3141  continue;
3142  }
3143  if (!is->paused &&
3144  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3145  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3146  if (loop != 1 && (!loop || --loop)) {
3148  } else if (autoexit) {
3149  ret = AVERROR_EOF;
3150  goto fail;
3151  }
3152  }
3153  ret = av_read_frame(ic, pkt);
3154  if (ret < 0) {
3155  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3156  if (is->video_stream >= 0)
3157  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
3158  if (is->audio_stream >= 0)
3159  packet_queue_put_nullpacket(&is->audioq, pkt, is->audio_stream);
3160  if (is->subtitle_stream >= 0)
3161  packet_queue_put_nullpacket(&is->subtitleq, pkt, is->subtitle_stream);
3162  is->eof = 1;
3163  }
3164  if (ic->pb && ic->pb->error) {
3165  if (autoexit)
3166  goto fail;
3167  else
3168  break;
3169  }
3170  SDL_LockMutex(wait_mutex);
3171  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3172  SDL_UnlockMutex(wait_mutex);
3173  continue;
3174  } else {
3175  is->eof = 0;
3176  }
3177 
3180  fprintf(stderr, "\x1b[2K\r");
3181  snprintf(metadata_description,
3182  sizeof(metadata_description),
3183  "\r New metadata for stream %d",
3184  pkt->stream_index);
3186  metadata_description, " ", AV_LOG_INFO);
3187  }
3189 
3190  /* check if packet is in play range specified by user, then queue, otherwise discard */
3191  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3192  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3193  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3194  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3196  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3197  <= ((double)duration / 1000000);
3198  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3199  packet_queue_put(&is->audioq, pkt);
3200  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3201  && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
3202  packet_queue_put(&is->videoq, pkt);
3203  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3204  packet_queue_put(&is->subtitleq, pkt);
3205  } else {
3207  }
3208  }
3209 
3210  ret = 0;
3211  fail:
3212  if (ic && !is->ic)
3213  avformat_close_input(&ic);
3214 
3215  av_packet_free(&pkt);
3216  if (ret != 0) {
3217  SDL_Event event;
3218 
3219  event.type = FF_QUIT_EVENT;
3220  event.user.data1 = is;
3221  SDL_PushEvent(&event);
3222  }
3223  SDL_DestroyMutex(wait_mutex);
3224  return 0;
3225 }
3226 
3227 static VideoState *stream_open(const char *filename,
3228  const AVInputFormat *iformat)
3229 {
3230  VideoState *is;
3231 
3232  is = av_mallocz(sizeof(VideoState));
3233  if (!is)
3234  return NULL;
3235  is->last_video_stream = is->video_stream = -1;
3236  is->last_audio_stream = is->audio_stream = -1;
3237  is->last_subtitle_stream = is->subtitle_stream = -1;
3238  is->filename = av_strdup(filename);
3239  if (!is->filename)
3240  goto fail;
3241  is->iformat = iformat;
3242  is->ytop = 0;
3243  is->xleft = 0;
3244 
3245  /* start video display */
3246  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3247  goto fail;
3248  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3249  goto fail;
3250  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3251  goto fail;
3252 
3253  if (packet_queue_init(&is->videoq) < 0 ||
3254  packet_queue_init(&is->audioq) < 0 ||
3255  packet_queue_init(&is->subtitleq) < 0)
3256  goto fail;
3257 
3258  if (!(is->continue_read_thread = SDL_CreateCond())) {
3259  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3260  goto fail;
3261  }
3262 
3263  init_clock(&is->vidclk, &is->videoq.serial);
3264  init_clock(&is->audclk, &is->audioq.serial);
3265  init_clock(&is->extclk, &is->extclk.serial);
3266  is->audio_clock_serial = -1;
3267  if (startup_volume < 0)
3268  av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
3269  if (startup_volume > 100)
3270  av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
3271  if (video_background) {
3272  if (!strcmp(video_background, "none")) {
3273  is->render_params.video_background_type = VIDEO_BACKGROUND_NONE;
3274  } else if (strcmp(video_background, "tiles")) {
3275  if (av_parse_color(is->render_params.video_background_color, video_background, -1, NULL) >= 0)
3276  is->render_params.video_background_type = VIDEO_BACKGROUND_COLOR;
3277  else
3278  goto fail;
3279  }
3280  }
3282  startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
3283  is->audio_volume = startup_volume;
3284  is->muted = 0;
3285  is->av_sync_type = av_sync_type;
3286  is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
3287  if (!is->read_tid) {
3288  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3289 fail:
3290  stream_close(is);
3291  return NULL;
3292  }
3293  return is;
3294 }
3295 
3297 {
3298  AVFormatContext *ic = is->ic;
3299  int start_index, stream_index;
3300  int old_index;
3301  AVStream *st;
3302  AVProgram *p = NULL;
3303  int nb_streams = is->ic->nb_streams;
3304 
3305  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3306  start_index = is->last_video_stream;
3307  old_index = is->video_stream;
3308  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3309  start_index = is->last_audio_stream;
3310  old_index = is->audio_stream;
3311  } else {
3312  start_index = is->last_subtitle_stream;
3313  old_index = is->subtitle_stream;
3314  }
3315  stream_index = start_index;
3316 
3317  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3318  p = av_find_program_from_stream(ic, NULL, is->video_stream);
3319  if (p) {
3320  nb_streams = p->nb_stream_indexes;
3321  for (start_index = 0; start_index < nb_streams; start_index++)
3322  if (p->stream_index[start_index] == stream_index)
3323  break;
3324  if (start_index == nb_streams)
3325  start_index = -1;
3326  stream_index = start_index;
3327  }
3328  }
3329 
3330  for (;;) {
3331  if (++stream_index >= nb_streams)
3332  {
3334  {
3335  stream_index = -1;
3336  is->last_subtitle_stream = -1;
3337  goto the_end;
3338  }
3339  if (start_index == -1)
3340  return;
3341  stream_index = 0;
3342  }
3343  if (stream_index == start_index)
3344  return;
3345  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3346  if (st->codecpar->codec_type == codec_type) {
3347  /* check that parameters are OK */
3348  switch (codec_type) {
3349  case AVMEDIA_TYPE_AUDIO:
3350  if (st->codecpar->sample_rate != 0 &&
3351  st->codecpar->ch_layout.nb_channels != 0)
3352  goto the_end;
3353  break;
3354  case AVMEDIA_TYPE_VIDEO:
3355  case AVMEDIA_TYPE_SUBTITLE:
3356  goto the_end;
3357  default:
3358  break;
3359  }
3360  }
3361  }
3362  the_end:
3363  if (p && stream_index != -1)
3364  stream_index = p->stream_index[stream_index];
3365  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3367  old_index,
3368  stream_index);
3369 
3370  stream_component_close(is, old_index);
3371  stream_component_open(is, stream_index);
3372 }
3373 
3374 
3376 {
3378  SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
3379 }
3380 
3382 {
3383  int next = is->show_mode;
3384  do {
3385  next = (next + 1) % SHOW_MODE_NB;
3386  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3387  if (is->show_mode != next) {
3388  is->force_refresh = 1;
3389  is->show_mode = next;
3390  }
3391 }
3392 
3393 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3394  double remaining_time = 0.0;
3395  SDL_PumpEvents();
3396  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3398  SDL_ShowCursor(0);
3399  cursor_hidden = 1;
3400  }
3401  if (remaining_time > 0.0)
3402  av_usleep((int64_t)(remaining_time * 1000000.0));
3403  remaining_time = REFRESH_RATE;
3404  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3405  video_refresh(is, &remaining_time);
3406  SDL_PumpEvents();
3407  }
3408 }
3409 
3410 static void seek_chapter(VideoState *is, int incr)
3411 {
3413  int i;
3414 
3415  if (!is->ic->nb_chapters)
3416  return;
3417 
3418  /* find the current chapter */
3419  for (i = 0; i < is->ic->nb_chapters; i++) {
3420  AVChapter *ch = is->ic->chapters[i];
3421  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3422  i--;
3423  break;
3424  }
3425  }
3426 
3427  i += incr;
3428  i = FFMAX(i, 0);
3429  if (i >= is->ic->nb_chapters)
3430  return;
3431 
3432  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3433  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3434  AV_TIME_BASE_Q), 0, 0);
3435 }
3436 
3437 /* handle an event sent by the GUI */
3438 static void event_loop(VideoState *cur_stream)
3439 {
3440  SDL_Event event;
3441  double incr, pos, frac;
3442 
3443  for (;;) {
3444  double x;
3445  refresh_loop_wait_event(cur_stream, &event);
3446  switch (event.type) {
3447  case SDL_KEYDOWN:
3448  if (exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
3449  do_exit(cur_stream);
3450  break;
3451  }
3452  // If we don't yet have a window, skip all key events, because read_thread might still be initializing...
3453  if (!cur_stream->width)
3454  continue;
3455  switch (event.key.keysym.sym) {
3456  case SDLK_f:
3457  toggle_full_screen(cur_stream);
3458  cur_stream->force_refresh = 1;
3459  break;
3460  case SDLK_p:
3461  case SDLK_SPACE:
3462  toggle_pause(cur_stream);
3463  break;
3464  case SDLK_m:
3465  toggle_mute(cur_stream);
3466  break;
3467  case SDLK_KP_MULTIPLY:
3468  case SDLK_0:
3469  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3470  break;
3471  case SDLK_KP_DIVIDE:
3472  case SDLK_9:
3473  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3474  break;
3475  case SDLK_s: // S: Step to next frame
3476  step_to_next_frame(cur_stream);
3477  break;
3478  case SDLK_a:
3480  break;
3481  case SDLK_v:
3483  break;
3484  case SDLK_c:
3488  break;
3489  case SDLK_t:
3491  break;
3492  case SDLK_w:
3493  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3494  if (++cur_stream->vfilter_idx >= nb_vfilters)
3495  cur_stream->vfilter_idx = 0;
3496  } else {
3497  cur_stream->vfilter_idx = 0;
3498  toggle_audio_display(cur_stream);
3499  }
3500  break;
3501  case SDLK_PAGEUP:
3502  if (cur_stream->ic->nb_chapters <= 1) {
3503  incr = 600.0;
3504  goto do_seek;
3505  }
3506  seek_chapter(cur_stream, 1);
3507  break;
3508  case SDLK_PAGEDOWN:
3509  if (cur_stream->ic->nb_chapters <= 1) {
3510  incr = -600.0;
3511  goto do_seek;
3512  }
3513  seek_chapter(cur_stream, -1);
3514  break;
3515  case SDLK_LEFT:
3516  incr = seek_interval ? -seek_interval : -10.0;
3517  goto do_seek;
3518  case SDLK_RIGHT:
3519  incr = seek_interval ? seek_interval : 10.0;
3520  goto do_seek;
3521  case SDLK_UP:
3522  incr = 60.0;
3523  goto do_seek;
3524  case SDLK_DOWN:
3525  incr = -60.0;
3526  do_seek:
3527  if (seek_by_bytes) {
3528  pos = -1;
3529  if (pos < 0 && cur_stream->video_stream >= 0)
3530  pos = frame_queue_last_pos(&cur_stream->pictq);
3531  if (pos < 0 && cur_stream->audio_stream >= 0)
3532  pos = frame_queue_last_pos(&cur_stream->sampq);
3533  if (pos < 0)
3534  pos = avio_tell(cur_stream->ic->pb);
3535  if (cur_stream->ic->bit_rate)
3536  incr *= cur_stream->ic->bit_rate / 8.0;
3537  else
3538  incr *= 180000.0;
3539  pos += incr;
3540  stream_seek(cur_stream, pos, incr, 1);
3541  } else {
3542  pos = get_master_clock(cur_stream);
3543  if (isnan(pos))
3544  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3545  pos += incr;
3546  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3547  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3548  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3549  }
3550  break;
3551  default:
3552  break;
3553  }
3554  break;
3555  case SDL_MOUSEBUTTONDOWN:
3556  if (exit_on_mousedown) {
3557  do_exit(cur_stream);
3558  break;
3559  }
3560  if (event.button.button == SDL_BUTTON_LEFT) {
3561  static int64_t last_mouse_left_click = 0;
3562  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3563  toggle_full_screen(cur_stream);
3564  cur_stream->force_refresh = 1;
3565  last_mouse_left_click = 0;
3566  } else {
3567  last_mouse_left_click = av_gettime_relative();
3568  }
3569  }
3570  case SDL_MOUSEMOTION:
3571  if (cursor_hidden) {
3572  SDL_ShowCursor(1);
3573  cursor_hidden = 0;
3574  }
3576  if (event.type == SDL_MOUSEBUTTONDOWN) {
3577  if (event.button.button != SDL_BUTTON_RIGHT)
3578  break;
3579  x = event.button.x;
3580  } else {
3581  if (!(event.motion.state & SDL_BUTTON_RMASK))
3582  break;
3583  x = event.motion.x;
3584  }
3585  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3586  uint64_t size = avio_size(cur_stream->ic->pb);
3587  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3588  } else {
3589  int64_t ts;
3590  int ns, hh, mm, ss;
3591  int tns, thh, tmm, tss;
3592  tns = cur_stream->ic->duration / 1000000LL;
3593  thh = tns / 3600;
3594  tmm = (tns % 3600) / 60;
3595  tss = (tns % 60);
3596  frac = x / cur_stream->width;
3597  ns = frac * tns;
3598  hh = ns / 3600;
3599  mm = (ns % 3600) / 60;
3600  ss = (ns % 60);
3602  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3603  hh, mm, ss, thh, tmm, tss);
3604  ts = frac * cur_stream->ic->duration;
3605  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3606  ts += cur_stream->ic->start_time;
3607  stream_seek(cur_stream, ts, 0, 0);
3608  }
3609  break;
3610  case SDL_WINDOWEVENT:
3611  switch (event.window.event) {
3612  case SDL_WINDOWEVENT_SIZE_CHANGED:
3613  screen_width = cur_stream->width = event.window.data1;
3614  screen_height = cur_stream->height = event.window.data2;
3615  if (cur_stream->vis_texture) {
3616  SDL_DestroyTexture(cur_stream->vis_texture);
3617  cur_stream->vis_texture = NULL;
3618  }
3619  if (vk_renderer)
3621  case SDL_WINDOWEVENT_EXPOSED:
3622  cur_stream->force_refresh = 1;
3623  }
3624  break;
3625  case SDL_QUIT:
3626  case FF_QUIT_EVENT:
3627  do_exit(cur_stream);
3628  break;
3629  default:
3630  break;
3631  }
3632  }
3633 }
3634 
3635 static int opt_width(void *optctx, const char *opt, const char *arg)
3636 {
3637  double num;
3638  int ret = parse_number(opt, arg, OPT_TYPE_INT64, 1, INT_MAX, &num);
3639  if (ret < 0)
3640  return ret;
3641 
3642  screen_width = num;
3643  return 0;
3644 }
3645 
3646 static int opt_height(void *optctx, const char *opt, const char *arg)
3647 {
3648  double num;
3649  int ret = parse_number(opt, arg, OPT_TYPE_INT64, 1, INT_MAX, &num);
3650  if (ret < 0)
3651  return ret;
3652 
3653  screen_height = num;
3654  return 0;
3655 }
3656 
3657 static int opt_format(void *optctx, const char *opt, const char *arg)
3658 {
3660  if (!file_iformat) {
3661  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3662  return AVERROR(EINVAL);
3663  }
3664  return 0;
3665 }
3666 
3667 static int opt_sync(void *optctx, const char *opt, const char *arg)
3668 {
3669  if (!strcmp(arg, "audio"))
3671  else if (!strcmp(arg, "video"))
3673  else if (!strcmp(arg, "ext"))
3675  else {
3676  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3677  exit(1);
3678  }
3679  return 0;
3680 }
3681 
3682 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3683 {
3684  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3685  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3686  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT : SHOW_MODE_NONE;
3687 
3688  if (show_mode == SHOW_MODE_NONE) {
3689  double num;
3690  int ret = parse_number(opt, arg, OPT_TYPE_INT, 0, SHOW_MODE_NB-1, &num);
3691  if (ret < 0)
3692  return ret;
3693  show_mode = num;
3694  }
3695  return 0;
3696 }
3697 
3698 static int opt_input_file(void *optctx, const char *filename)
3699 {
3700  if (input_filename) {
3702  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3703  filename, input_filename);
3704  return AVERROR(EINVAL);
3705  }
3706  if (!strcmp(filename, "-"))
3707  filename = "fd:";
3708  input_filename = av_strdup(filename);
3709  if (!input_filename)
3710  return AVERROR(ENOMEM);
3711 
3712  return 0;
3713 }
3714 
3715 static int opt_codec(void *optctx, const char *opt, const char *arg)
3716 {
3717  const char *spec = strchr(opt, ':');
3718  const char **name;
3719  if (!spec) {
3721  "No media specifier was specified in '%s' in option '%s'\n",
3722  arg, opt);
3723  return AVERROR(EINVAL);
3724  }
3725  spec++;
3726 
3727  switch (spec[0]) {
3728  case 'a' : name = &audio_codec_name; break;
3729  case 's' : name = &subtitle_codec_name; break;
3730  case 'v' : name = &video_codec_name; break;
3731  default:
3733  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3734  return AVERROR(EINVAL);
3735  }
3736 
3737  av_freep(name);
3738  *name = av_strdup(arg);
3739  return *name ? 0 : AVERROR(ENOMEM);
3740 }
3741 
3742 static int dummy;
3743 
3744 static const OptionDef options[] = {
3746  { "x", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3747  { "y", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3748  { "fs", OPT_TYPE_BOOL, 0, { &is_full_screen }, "force full screen" },
3749  { "an", OPT_TYPE_BOOL, 0, { &audio_disable }, "disable audio" },
3750  { "vn", OPT_TYPE_BOOL, 0, { &video_disable }, "disable video" },
3751  { "sn", OPT_TYPE_BOOL, 0, { &subtitle_disable }, "disable subtitling" },
3752  { "ast", OPT_TYPE_STRING, OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3753  { "vst", OPT_TYPE_STRING, OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3754  { "sst", OPT_TYPE_STRING, OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3755  { "ss", OPT_TYPE_TIME, 0, { &start_time }, "seek to a given position in seconds", "pos" },
3756  { "t", OPT_TYPE_TIME, 0, { &duration }, "play \"duration\" seconds of audio/video", "duration" },
3757  { "bytes", OPT_TYPE_INT, 0, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3758  { "seek_interval", OPT_TYPE_FLOAT, 0, { &seek_interval }, "set seek interval for left/right keys, in seconds", "seconds" },
3759  { "nodisp", OPT_TYPE_BOOL, 0, { &display_disable }, "disable graphical display" },
3760  { "noborder", OPT_TYPE_BOOL, 0, { &borderless }, "borderless window" },
3761  { "alwaysontop", OPT_TYPE_BOOL, 0, { &alwaysontop }, "window always on top" },
3762  { "volume", OPT_TYPE_INT, 0, { &startup_volume}, "set startup volume 0=min 100=max", "volume" },
3763  { "f", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3764  { "stats", OPT_TYPE_BOOL, OPT_EXPERT, { &show_status }, "show status", "" },
3765  { "fast", OPT_TYPE_BOOL, OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3766  { "genpts", OPT_TYPE_BOOL, OPT_EXPERT, { &genpts }, "generate pts", "" },
3767  { "drp", OPT_TYPE_INT, OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3768  { "lowres", OPT_TYPE_INT, OPT_EXPERT, { &lowres }, "", "" },
3769  { "sync", OPT_TYPE_FUNC, OPT_FUNC_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3770  { "autoexit", OPT_TYPE_BOOL, OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3771  { "exitonkeydown", OPT_TYPE_BOOL, OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3772  { "exitonmousedown", OPT_TYPE_BOOL, OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3773  { "loop", OPT_TYPE_INT, OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3774  { "framedrop", OPT_TYPE_BOOL, OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3775  { "infbuf", OPT_TYPE_BOOL, OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3776  { "window_title", OPT_TYPE_STRING, 0, { &window_title }, "set window title", "window title" },
3777  { "left", OPT_TYPE_INT, OPT_EXPERT, { &screen_left }, "set the x position for the left of the window", "x pos" },
3778  { "top", OPT_TYPE_INT, OPT_EXPERT, { &screen_top }, "set the y position for the top of the window", "y pos" },
3779  { "vf", OPT_TYPE_FUNC, OPT_FUNC_ARG | OPT_EXPERT, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3780  { "af", OPT_TYPE_STRING, 0, { &afilters }, "set audio filters", "filter_graph" },
3781  { "rdftspeed", OPT_TYPE_INT, OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3782  { "showmode", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3783  { "i", OPT_TYPE_BOOL, 0, { &dummy}, "read specified file", "input_file"},
3784  { "codec", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3785  { "acodec", OPT_TYPE_STRING, OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3786  { "scodec", OPT_TYPE_STRING, OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3787  { "vcodec", OPT_TYPE_STRING, OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3788  { "autorotate", OPT_TYPE_BOOL, 0, { &autorotate }, "automatically rotate video", "" },
3789  { "find_stream_info", OPT_TYPE_BOOL, OPT_INPUT | OPT_EXPERT, { &find_stream_info },
3790  "read and decode the streams to fill missing information with heuristics" },
3791  { "filter_threads", OPT_TYPE_INT, OPT_EXPERT, { &filter_nbthreads }, "number of filter threads per graph" },
3792  { "enable_vulkan", OPT_TYPE_BOOL, 0, { &enable_vulkan }, "enable vulkan renderer" },
3793  { "vulkan_params", OPT_TYPE_STRING, OPT_EXPERT, { &vulkan_params }, "vulkan configuration using a list of key=value pairs separated by ':'" },
3794  { "video_bg", OPT_TYPE_STRING, OPT_EXPERT, { &video_background }, "set video background for transparent videos" },
3795  { "hwaccel", OPT_TYPE_STRING, OPT_EXPERT, { &hwaccel }, "use HW accelerated decoding" },
3796  { NULL, },
3797 };
3798 
3799 static void show_usage(void)
3800 {
3801  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3802  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3803  av_log(NULL, AV_LOG_INFO, "\n");
3804 }
3805 
3806 void show_help_default(const char *opt, const char *arg)
3807 {
3809  show_usage();
3810  show_help_options(options, "Main options:", 0, OPT_EXPERT);
3811  show_help_options(options, "Advanced options:", OPT_EXPERT, 0);
3812  printf("\n");
3816  printf("\nWhile playing:\n"
3817  "q, ESC quit\n"
3818  "f toggle full screen\n"
3819  "p, SPC pause\n"
3820  "m toggle mute\n"
3821  "9, 0 decrease and increase volume respectively\n"
3822  "/, * decrease and increase volume respectively\n"
3823  "a cycle audio channel in the current program\n"
3824  "v cycle video channel\n"
3825  "t cycle subtitle channel in the current program\n"
3826  "c cycle program\n"
3827  "w cycle video filters or show modes\n"
3828  "s activate frame-step mode\n"
3829  "left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
3830  "down/up seek backward/forward 1 minute\n"
3831  "page down/page up seek backward/forward 10 minutes\n"
3832  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3833  "left double-click toggle full screen\n"
3834  );
3835 }
3836 
3837 /* Called from the main */
3838 int main(int argc, char **argv)
3839 {
3840  int flags, ret;
3841  VideoState *is;
3842 
3843  init_dynload();
3844 
3846  parse_loglevel(argc, argv, options);
3847 
3848  /* register all codecs, demux and protocols */
3849 #if CONFIG_AVDEVICE
3851 #endif
3853 
3854  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3855  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3856 
3857  show_banner(argc, argv, options);
3858 
3859  ret = parse_options(NULL, argc, argv, options, opt_input_file);
3860  if (ret < 0)
3861  exit(ret == AVERROR_EXIT ? 0 : 1);
3862 
3863  if (!input_filename) {
3864  show_usage();
3865  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3867  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3868  exit(1);
3869  }
3870 
3871  if (display_disable) {
3872  video_disable = 1;
3873  }
3874  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3875  if (audio_disable)
3876  flags &= ~SDL_INIT_AUDIO;
3877  else {
3878  /* Try to work around an occasional ALSA buffer underflow issue when the
3879  * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3880  if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3881  SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
3882  }
3883  if (display_disable)
3884  flags &= ~SDL_INIT_VIDEO;
3885  if (SDL_Init (flags)) {
3886  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3887  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3888  exit(1);
3889  }
3890 
3891  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3892  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3893 
3894  if (!display_disable) {
3895  int flags = SDL_WINDOW_HIDDEN;
3896  if (alwaysontop)
3897 #if SDL_VERSION_ATLEAST(2,0,5)
3898  flags |= SDL_WINDOW_ALWAYS_ON_TOP;
3899 #else
3900  av_log(NULL, AV_LOG_WARNING, "Your SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.\n");
3901 #endif
3902  if (borderless)
3903  flags |= SDL_WINDOW_BORDERLESS;
3904  else
3905  flags |= SDL_WINDOW_RESIZABLE;
3906 
3907 #ifdef SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR
3908  SDL_SetHint(SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR, "0");
3909 #endif
3910  if (hwaccel && !enable_vulkan) {
3911  av_log(NULL, AV_LOG_INFO, "Enable vulkan renderer to support hwaccel %s\n", hwaccel);
3912  enable_vulkan = 1;
3913  }
3914  if (enable_vulkan) {
3916  if (vk_renderer) {
3917 #if SDL_VERSION_ATLEAST(2, 0, 6)
3918  flags |= SDL_WINDOW_VULKAN;
3919 #endif
3920  } else {
3921  av_log(NULL, AV_LOG_WARNING, "Doesn't support vulkan renderer, fallback to SDL renderer\n");
3922  enable_vulkan = 0;
3923  }
3924  }
3925  window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
3926  SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
3927  if (!window) {
3928  av_log(NULL, AV_LOG_FATAL, "Failed to create window: %s", SDL_GetError());
3929  do_exit(NULL);
3930  }
3931 
3932  if (vk_renderer) {
3933  AVDictionary *dict = NULL;
3934 
3935  if (vulkan_params) {
3936  int ret = av_dict_parse_string(&dict, vulkan_params, "=", ":", 0);
3937  if (ret < 0) {
3938  av_log(NULL, AV_LOG_FATAL, "Failed to parse, %s\n", vulkan_params);
3939  do_exit(NULL);
3940  }
3941  }
3943  av_dict_free(&dict);
3944  if (ret < 0) {
3945  av_log(NULL, AV_LOG_FATAL, "Failed to create vulkan renderer, %s\n", av_err2str(ret));
3946  do_exit(NULL);
3947  }
3948  } else {
3949  renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
3950  if (!renderer) {
3951  av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
3952  renderer = SDL_CreateRenderer(window, -1, 0);
3953  }
3954  if (renderer) {
3955  if (!SDL_GetRendererInfo(renderer, &renderer_info))
3956  av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", renderer_info.name);
3957  }
3958  if (!renderer || !renderer_info.num_texture_formats) {
3959  av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError());
3960  do_exit(NULL);
3961  }
3962  }
3963  }
3964 
3966  if (!is) {
3967  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3968  do_exit(NULL);
3969  }
3970 
3971  event_loop(is);
3972 
3973  /* never returns */
3974 
3975  return 0;
3976 }
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:605
flags
const SwsFlags flags[]
Definition: swscale.c:61
AVSubtitle
Definition: avcodec.h:2090
rect::w
int w
Definition: f_ebur128.c:78
do_exit
static void do_exit(VideoState *is)
Definition: ffplay.c:1346
AVBufferSrcParameters::color_space
enum AVColorSpace color_space
Video only, the YUV colorspace and range.
Definition: buffersrc.h:121
VideoState::seek_rel
int64_t seek_rel
Definition: ffplay.c:213
AV_PKT_DATA_DISPLAYMATRIX
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:105
VIDEO_BACKGROUND_TILE_SIZE
#define VIDEO_BACKGROUND_TILE_SIZE
Definition: ffplay_renderer.h:28
RGB
Definition: cms.c:66
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:432
show_help_options
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:107
AVCodec
AVCodec.
Definition: codec.h:172
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:203
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
VideoState::video_st
AVStream * video_st
Definition: ffplay.c:284
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:367
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
Frame::width
int width
Definition: ffplay.c:160
AVFMT_NO_BYTE_SEEK
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:486
AV_SYNC_AUDIO_MASTER
@ AV_SYNC_AUDIO_MASTER
Definition: ffplay.c:182
av_clip
#define av_clip
Definition: common.h:100
VIDEO_BACKGROUND_TILES
@ VIDEO_BACKGROUND_TILES
Definition: ffplay_renderer.h:31
VideoState::rdft
AVTXContext * rdft
Definition: ffplay.c:264
AudioParams::fmt
enum AVSampleFormat fmt
Definition: ffplay.c:133
av_sync_type
static int av_sync_type
Definition: ffplay.c:327
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
printf
__device__ int printf(const char *,...)
AVSubtitle::rects
AVSubtitleRect ** rects
Definition: avcodec.h:2095
opt.h
renderer_info
static SDL_RendererInfo renderer_info
Definition: ffplay.c:366
FrameData::pkt_pos
int64_t pkt_pos
Definition: ffplay.c:149
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1553
AVALPHA_MODE_STRAIGHT
@ AVALPHA_MODE_STRAIGHT
Alpha channel is independent of color values.
Definition: pixfmt.h:813
frame_queue_nb_remaining
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:804
VideoState::agraph
AVFilterGraph * agraph
Definition: ffplay.c:299
configure_audio_filters
static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
Definition: ffplay.c:2055
opt_add_vfilter
static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:396
frame_queue_next
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:788
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:51
Decoder::finished
int finished
Definition: ffplay.c:192
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:615
av_compare_ts
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
FrameData
Definition: ffmpeg.h:718
check_avoptions
int check_avoptions(AVDictionary *m)
Definition: cmdutils.c:1605
frame_queue_last_pos
static int64_t frame_queue_last_pos(FrameQueue *f)
Definition: ffplay.c:810
av_find_best_stream
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, const AVCodec **decoder_ret, int flags)
Definition: avformat.c:378
out
FILE * out
Definition: movenc.c:55
VideoState::rdft_fn
av_tx_fn rdft_fn
Definition: ffplay.c:265
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1032
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
video_thread
static int video_thread(void *arg)
Definition: ffplay.c:2216
VideoState::av_sync_type
int av_sync_type
Definition: ffplay.c:232
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:659
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
set_default_window_size
static void set_default_window_size(int width, int height, AVRational sar)
Definition: ffplay.c:1378
AV_PIX_FMT_BGR32
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:513
AV_NOSYNC_THRESHOLD
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:85
AVFormatContext::nb_chapters
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1364
AVCodecParameters
This struct describes the properties of an encoded stream.
Definition: codec_par.h:47
AV_LOG_QUIET
#define AV_LOG_QUIET
Print no output.
Definition: log.h:192
FrameQueue::keep_last
int keep_last
Definition: ffplay.c:174
VideoState::audio_hw_buf_size
int audio_hw_buf_size
Definition: ffplay.c:242
decoder_decode_frame
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
Definition: ffplay.c:581
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
av_parse_color
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
Definition: parseutils.c:359
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_DISPOSITION_ATTACHED_PIC
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:670
FrameQueue::cond
SDL_cond * cond
Definition: ffplay.c:177
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:155
AVStream::discard
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:815
av_find_program_from_stream
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: avformat.c:327
display_disable
static int display_disable
Definition: ffplay.c:322
screen_width
static int screen_width
Definition: ffplay.c:312
ffplay_renderer.h
DEBUG
#define DEBUG
Definition: vf_framerate.c:29
sws_dict
AVDictionary * sws_dict
Definition: cmdutils.c:56
swr_set_compensation
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:901
SAMPLE_ARRAY_SIZE
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:103
AVTXContext
Definition: tx_priv.h:235
rect
Definition: f_ebur128.c:78
update_volume
static void update_volume(VideoState *is, int sign, double step)
Definition: ffplay.c:1564
AVFMT_NOTIMESTAMPS
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:478
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:263
VideoState::auddec
Decoder auddec
Definition: ffplay.c:226
int64_t
long long int64_t
Definition: coverity.c:34
screen_left
static int screen_left
Definition: ffplay.c:314
AudioParams::frame_size
int frame_size
Definition: ffplay.c:134
AVSubtitleRect
Definition: avcodec.h:2063
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2094
Decoder::next_pts
int64_t next_pts
Definition: ffplay.c:197
decoder_start
static int decoder_start(Decoder *d, int(*fn)(void *), const char *thread_name, void *arg)
Definition: ffplay.c:2205
rect::y
int y
Definition: f_ebur128.c:78
FrameQueue::size
int size
Definition: ffplay.c:172
avformat_get_class
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:190
sws_freeContext
void sws_freeContext(SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2255
av_unused
#define av_unused
Definition: attributes.h:151
normalize.log
log
Definition: normalize.py:21
Frame::sar
AVRational sar
Definition: ffplay.c:163
out_size
int out_size
Definition: movenc.c:56
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
VideoState::vis_texture
SDL_Texture * vis_texture
Definition: ffplay.c:272
queue_picture
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
Definition: ffplay.c:1796
mode
Definition: swscale.c:56
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
AudioParams
Definition: ffplay.c:130
VideoState::subtitle_st
AVStream * subtitle_st
Definition: ffplay.c:277
VideoState::audio_filter_src
struct AudioParams audio_filter_src
Definition: ffplay.c:252
pixdesc.h
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1332
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
VideoState::frame_last_filter_delay
double frame_last_filter_delay
Definition: ffplay.c:282
AVFrame::width
int width
Definition: frame.h:499
VideoState::xleft
int xleft
Definition: ffplay.c:291
AVPacketSideData
This structure stores auxiliary information for decoding, presenting, or otherwise processing the cod...
Definition: packet.h:409
Frame::pts
double pts
Definition: ffplay.c:157
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:777
OPT_INPUT
#define OPT_INPUT
Definition: cmdutils.h:237
frame_queue_init
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
Definition: ffplay.c:691
subtitle_codec_name
static const char * subtitle_codec_name
Definition: ffplay.c:342
parse_number
int parse_number(const char *context, const char *numstr, enum OptionType type, double min, double max, double *dst)
Parse a string and return its corresponding value as a double.
Definition: cmdutils.c:84
AV_HWDEVICE_TYPE_NONE
@ AV_HWDEVICE_TYPE_NONE
Definition: hwcontext.h:28
AVPacket::data
uint8_t * data
Definition: packet.h:588
EXTERNAL_CLOCK_MIN_FRAMES
#define EXTERNAL_CLOCK_MIN_FRAMES
Definition: ffplay.c:67
b
#define b
Definition: input.c:42
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:190
vk_renderer_create
int vk_renderer_create(VkRenderer *renderer, SDL_Window *window, AVDictionary *opt)
Definition: ffplay_renderer.c:847
AVChapter::start
int64_t start
Definition: avformat.h:1226
Clock
Definition: ffplay.c:138
data
const char data[16]
Definition: mxf.c:149
SAMPLE_QUEUE_SIZE
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:127
program_name
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:62
Decoder::queue
PacketQueue * queue
Definition: ffplay.c:189
format_opts
AVDictionary * format_opts
Definition: cmdutils.c:58
AVSEEK_FLAG_BYTE
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2375
AVIOContext::error
int error
contains the error code or 0 if no error happened
Definition: avio.h:239
AV_PIX_FMT_RGB32_1
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:512
VideoState::audio_diff_avg_coef
double audio_diff_avg_coef
Definition: ffplay.c:237
av_hwdevice_find_type_by_name
enum AVHWDeviceType av_hwdevice_find_type_by_name(const char *name)
Look up an AVHWDeviceType by name.
Definition: hwcontext.c:110
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
CURSOR_HIDE_DELAY
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:105
SDL_VOLUME_STEP
#define SDL_VOLUME_STEP
Definition: ffplay.c:76
AVComplexFloat
Definition: tx.h:27
show_help_children
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
Definition: cmdutils.c:140
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:102
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
autorotate
static int autorotate
Definition: ffplay.c:350
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:606
TextureFormatEntry::texture_fmt
int texture_fmt
Definition: ffplay.c:373
video_disable
static int video_disable
Definition: ffplay.c:317
Frame::uploaded
int uploaded
Definition: ffplay.c:164
VIDEO_BACKGROUND_COLOR
@ VIDEO_BACKGROUND_COLOR
Definition: ffplay_renderer.h:32
mathematics.h
AVDictionary
Definition: dict.c:32
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:324
compute_target_delay
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1579
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Frame
Definition: ffplay.c:153
opt_input_file
static int opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3698
stream_close
static void stream_close(VideoState *is)
Definition: ffplay.c:1310
av_read_frame
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: demux.c:1583
SDL_AUDIO_MAX_CALLBACKS_PER_SEC
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:73
vk_renderer_destroy
void vk_renderer_destroy(VkRenderer *renderer)
Definition: ffplay_renderer.c:868
VideoState::paused
int paused
Definition: ffplay.c:207
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:329
init_clock
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1460
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
AVBufferSrcParameters::height
int height
Definition: buffersrc.h:87
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:326
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
get_master_clock
static double get_master_clock(VideoState *is)
Definition: ffplay.c:1493
VideoState::width
int width
Definition: ffplay.c:291
file_iformat
static const AVInputFormat * file_iformat
Definition: ffplay.c:307
dummy
static int dummy
Definition: ffplay.c:3742
FF_QUIT_EVENT
#define FF_QUIT_EVENT
Definition: ffplay.c:362
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:74
PacketQueue
Definition: ffplay.c:114
av_tx_init
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
Definition: tx.c:903
subtitle_thread
static int subtitle_thread(void *arg)
Definition: ffplay.c:2319
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:599
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
VideoState::last_subtitle_stream
int last_subtitle_stream
Definition: ffplay.c:301
VideoState::SHOW_MODE_NONE
@ SHOW_MODE_NONE
Definition: ffplay.c:259
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:119
OptionDef
Definition: cmdutils.h:195
audio_decode_frame
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2422
subtitle_disable
static int subtitle_disable
Definition: ffplay.c:318
VideoState::pictq
FrameQueue pictq
Definition: ffplay.c:222
genpts
static int genpts
Definition: ffplay.c:331
VideoState::swr_ctx
struct SwrContext * swr_ctx
Definition: ffplay.c:254
opt_sync
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3667
step_to_next_frame
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1571
upload_texture
static int upload_texture(SDL_Texture **tex, AVFrame *frame)
Definition: ffplay.c:908
VideoState::sampq
FrameQueue sampq
Definition: ffplay.c:224
TextureFormatEntry::format
enum AVPixelFormat format
Definition: ffplay.c:372
video_background
static char * video_background
Definition: ffplay.c:355
FrameQueue::rindex
int rindex
Definition: ffplay.c:170
video_display
static void video_display(VideoState *is)
Definition: ffplay.c:1414
AVCodec::max_lowres
uint8_t max_lowres
maximum value for lowres supported by the decoder
Definition: codec.h:192
avformat_close_input
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: demux.c:377
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:706
AVFormatContext::interrupt_callback
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1534
SDL_AUDIO_MIN_BUFFER_SIZE
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:71
startup_volume
static int startup_volume
Definition: ffplay.c:325
window
static SDL_Window * window
Definition: ffplay.c:364
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
A convenience wrapper that allocates and initializes a filter in a single step.
Definition: avfiltergraph.c:140
avfilter_graph_alloc_filter
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
Definition: avfiltergraph.c:167
fifo.h
toggle_full_screen
static void toggle_full_screen(VideoState *is)
Definition: ffplay.c:3375
Clock::queue_serial
int * queue_serial
Definition: ffplay.c:145
VideoState::extclk
Clock extclk
Definition: ffplay.c:220
VideoState::seek_flags
int seek_flags
Definition: ffplay.c:211
alwaysontop
static int alwaysontop
Definition: ffplay.c:324
VideoState::audio_st
AVStream * audio_st
Definition: ffplay.c:240
packet_queue_init
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:472
AVPacket::opaque_ref
AVBufferRef * opaque_ref
AVBufferRef for free use by the API user.
Definition: packet.h:624
AUDIO_DIFF_AVG_NB
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:96
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:1047
fail
#define fail()
Definition: checkasm.h:217
AVBufferSrcParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only, the sample (pixel) aspect ratio.
Definition: buffersrc.h:92
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:85
FrameQueue
Definition: ffplay.c:168
packet_queue_put
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:443
samplefmt.h
AVSubtitleRect::x
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2064
sws_getCachedContext
SwsContext * sws_getCachedContext(SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2347
VideoState::video_stream
int video_stream
Definition: ffplay.c:283
autoexit
static int autoexit
Definition: ffplay.c:334
avio_tell
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:494
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
AVChapter
Definition: avformat.h:1223
video_image_display
static void video_image_display(VideoState *is)
Definition: ffplay.c:1005
val
static double val(void *priv, double ch)
Definition: aeval.c:77
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:770
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
opt_show_mode
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3682
Decoder::empty_queue_cond
SDL_cond * empty_queue_cond
Definition: ffplay.c:194
pts
static int64_t pts
Definition: transcode_aac.c:644
set_clock_speed
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1454
VideoState::audio_diff_threshold
double audio_diff_threshold
Definition: ffplay.c:238
OPT_TYPE_FLOAT
@ OPT_TYPE_FLOAT
Definition: cmdutils.h:86
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:835
VideoState::audio_diff_cum
double audio_diff_cum
Definition: ffplay.c:236
VideoState::last_video_stream
int last_video_stream
Definition: ffplay.c:301
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
fast
static int fast
Definition: ffplay.c:330
loop
static int loop
Definition: ffplay.c:337
AVRational::num
int num
Numerator.
Definition: rational.h:59
avformat_network_init
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:561
VideoState::rdft_bits
int rdft_bits
Definition: ffplay.c:266
swr_convert
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t *const *out_arg, int out_count, const uint8_t *const *in_arg, int in_count)
Convert audio.
Definition: swresample.c:717
opt_height
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3646
avsubtitle_free
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: avcodec.c:411
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
AV_SYNC_EXTERNAL_CLOCK
@ AV_SYNC_EXTERNAL_CLOCK
Definition: ffplay.c:184
sdl_texture_format_map
static const struct TextureFormatEntry sdl_texture_format_map[]
fn
Definition: ops_tmpl_float.c:122
AVFormatContext::bit_rate
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1406
is_full_screen
static int is_full_screen
Definition: ffplay.c:359
avcodec_decode_subtitle2
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, const AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:914
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:76
set_sdl_yuv_conversion_mode
static void set_sdl_yuv_conversion_mode(AVFrame *frame)
Definition: ffplay.c:953
lrint
#define lrint
Definition: tablegen.h:53
Frame::flip_v
int flip_v
Definition: ffplay.c:165
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:236
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
AVFormatContext::metadata
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1496
vk_get_renderer
VkRenderer * vk_get_renderer(void)
Definition: ffplay_renderer.c:840
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
AVInputFormat
Definition: avformat.h:544
audio_thread
static int audio_thread(void *arg)
Definition: ffplay.c:2127
set_clock
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1448
av_dump_format
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
Definition: dump.c:845
VideoState
Definition: ffplay.c:202
frame_queue_peek_next
static Frame * frame_queue_peek_next(FrameQueue *f)
Definition: ffplay.c:736
sdl_audio_callback
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
Definition: ffplay.c:2532
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
sync_clock_to_slave
static void sync_clock_to_slave(Clock *c, Clock *slave)
Definition: ffplay.c:1468
av_tx_fn
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Definition: tx.h:151
swr_init
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:138
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
Definition: buffersink.c:347
avformat_open_input
int avformat_open_input(AVFormatContext **ps, const char *url, const AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: demux.c:231
frame_queue_signal
static void frame_queue_signal(FrameQueue *f)
Definition: ffplay.c:724
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:60
av_channel_layout_describe
int av_channel_layout_describe(const AVChannelLayout *channel_layout, char *buf, size_t buf_size)
Get a human-readable string describing the channel layout properties.
Definition: channel_layout.c:653
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
VideoState::ShowMode
ShowMode
Definition: ffplay.c:258
Decoder::avctx
AVCodecContext * avctx
Definition: ffplay.c:190
s
#define s(width, name)
Definition: cbs_vp9.c:198
show_help_default
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3806
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
default_height
static int default_height
Definition: ffplay.c:311
AVFormatContext::flags
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1415
AVCodecParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only.
Definition: codec_par.h:144
AVInputFormat::name
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:549
AVFormatContext::iformat
const struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1276
AV_PIX_FMT_0BGR32
#define AV_PIX_FMT_0BGR32
Definition: pixfmt.h:516
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:707
AVDictionaryEntry::key
char * key
Definition: dict.h:91
Clock::last_updated
double last_updated
Definition: ffplay.c:141
PacketQueue::duration
int64_t duration
Definition: ffplay.c:118
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:201
AVSubtitleRect::y
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2065
AVCodecParameters::width
int width
Video only.
Definition: codec_par.h:134
video_stream
static AVStream * video_stream
Definition: demux_decode.c:42
calculate_display_rect
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
Definition: ffplay.c:863
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
screen_height
static int screen_height
Definition: ffplay.c:313
EXTERNAL_CLOCK_SPEED_STEP
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:93
Decoder::pkt_serial
int pkt_serial
Definition: ffplay.c:191
configure_video_filters
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
Definition: ffplay.c:1903
avcodec_receive_frame
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Alias for avcodec_receive_frame_flags(avctx, frame, 0).
Definition: avcodec.c:721
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
AVHWDeviceType
AVHWDeviceType
Definition: hwcontext.h:27
AVMEDIA_TYPE_NB
@ AVMEDIA_TYPE_NB
Definition: avutil.h:205
RenderParams
Definition: ffplay_renderer.h:36
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
av_read_play
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: demux_utils.c:173
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
opt_codec
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3715
AVPacketSideData::data
uint8_t * data
Definition: packet.h:410
Clock::pts_drift
double pts_drift
Definition: ffplay.c:140
VideoState::videoq
PacketQueue videoq
Definition: ffplay.c:285
ctx
AVFormatContext * ctx
Definition: movenc.c:49
av_guess_sample_aspect_ratio
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio.
Definition: avformat.c:662
channels
channels
Definition: aptx.h:31
limits.h
REFRESH_RATE
#define REFRESH_RATE
Definition: ffplay.c:99
FrameQueue::rindex_shown
int rindex_shown
Definition: ffplay.c:175
nb_streams
static int nb_streams
Definition: ffprobe.c:347
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
vk_renderer_get_hw_dev
int vk_renderer_get_hw_dev(VkRenderer *renderer, AVBufferRef **dev)
Definition: ffplay_renderer.c:853
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2096
VideoState::force_refresh
int force_refresh
Definition: ffplay.c:206
get_clock
static double get_clock(Clock *c)
Definition: ffplay.c:1428
av_opt_set_array
int av_opt_set_array(void *obj, const char *name, int search_flags, unsigned int start_elem, unsigned int nb_elems, enum AVOptionType val_type, const void *val)
Add, replace, or remove elements for an array option.
Definition: opt.c:2283
screen_top
static int screen_top
Definition: ffplay.c:315
VideoState::audio_diff_avg_count
int audio_diff_avg_count
Definition: ffplay.c:239
EXTERNAL_CLOCK_SPEED_MIN
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:91
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
renderer
static SDL_Renderer * renderer
Definition: ffplay.c:365
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
SwrContext
The libswresample context.
Definition: swresample_internal.h:95
vp_duration
static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp)
Definition: ffplay.c:1609
av_mallocz
#define av_mallocz(s)
Definition: tableprint_vlc.h:31
NAN
#define NAN
Definition: mathematics.h:115
AV_PIX_FMT_BGR32_1
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:514
VideoState::step
int step
Definition: ffplay.c:292
synchronize_audio
static int synchronize_audio(VideoState *is, int nb_samples)
Definition: ffplay.c:2374
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:449
arg
const char * arg
Definition: jacosubdec.c:65
if
if(ret)
Definition: filter_design.txt:179
window_title
static const char * window_title
Definition: ffplay.c:309
Clock::speed
double speed
Definition: ffplay.c:142
VideoState::SHOW_MODE_VIDEO
@ SHOW_MODE_VIDEO
Definition: ffplay.c:259
AVBufferSrcParameters::alpha_mode
enum AVAlphaMode alpha_mode
Video only, the alpha mode.
Definition: buffersrc.h:130
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:232
AVFormatContext
Format I/O context.
Definition: avformat.h:1264
av_log_get_level
int av_log_get_level(void)
Get the current log level.
Definition: log.c:472
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:651
init_dynload
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:75
opts
AVDictionary * opts
Definition: movenc.c:51
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:767
AVPacket::buf
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: packet.h:571
avcodec_parameters_to_context
int avcodec_parameters_to_context(AVCodecContext *codec, const struct AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
AVSubtitleRect::w
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:2066
seek_chapter
static void seek_chapter(VideoState *is, int incr)
Definition: ffplay.c:3410
VkRenderer
Definition: ffplay_renderer.c:49
get_master_sync_type
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1476
avcodec_get_class
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:184
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
frame_queue_destroy
static void frame_queue_destroy(FrameQueue *f)
Definition: ffplay.c:712
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1434
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:783
NULL
#define NULL
Definition: coverity.c:32
avcodec_find_decoder_by_name
const AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
Definition: allcodecs.c:1071
FrameQueue::max_size
int max_size
Definition: ffplay.c:173
format
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image format
Definition: swscale-v2.txt:14
AV_DICT_MULTIKEY
#define AV_DICT_MULTIKEY
Allow to store several equal keys in the dictionary.
Definition: dict.h:84
OPT_EXPERT
#define OPT_EXPERT
Definition: cmdutils.h:211
VideoState::render_params
RenderParams render_params
Definition: ffplay.c:271
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:74
Decoder
Definition: ffmpeg.h:465
AudioParams::freq
int freq
Definition: ffplay.c:131
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
isnan
#define isnan(x)
Definition: libm.h:342
AudioParams::ch_layout
AVChannelLayout ch_layout
Definition: ffplay.c:132
audio_open
static int audio_open(void *opaque, AVChannelLayout *wanted_channel_layout, int wanted_sample_rate, struct AudioParams *audio_hw_params)
Definition: ffplay.c:2575
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:591
filter_codec_opts
int filter_codec_opts(const AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, const AVCodec *codec, AVDictionary **dst, AVDictionary **opts_used)
Filter out options for given codec.
Definition: cmdutils.c:1423
stream_cycle_channel
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3296
VideoState::frame_drops_late
int frame_drops_late
Definition: ffplay.c:256
AV_CODEC_FLAG2_FAST
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:337
AVFormatContext::pb
AVIOContext * pb
I/O context.
Definition: avformat.h:1306
av_log_set_flags
void av_log_set_flags(int arg)
Definition: log.c:482
parseutils.h
frame_queue_unref_item
static void frame_queue_unref_item(Frame *vp)
Definition: ffplay.c:685
FrameQueue::queue
Frame queue[FRAME_QUEUE_SIZE]
Definition: ffplay.c:169
VideoState::last_i_start
int last_i_start
Definition: ffplay.c:263
Decoder::packet_pending
int packet_pending
Definition: ffplay.c:193
cursor_last_shown
static int64_t cursor_last_shown
Definition: ffplay.c:345
options
Definition: swscale.c:43
frame_queue_peek
static Frame * frame_queue_peek(FrameQueue *f)
Definition: ffplay.c:731
AVStream::metadata
AVDictionary * metadata
Definition: avformat.h:824
avfilter_inout_alloc
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
Definition: graphparser.c:71
Frame::duration
double duration
Definition: ffplay.c:158
lowres
static int lowres
Definition: ffplay.c:332
double
double
Definition: af_crystalizer.c:132
AV_DICT_DONT_OVERWRITE
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:81
AV_PIX_FMT_RGB8
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)3R 3G 2B(lsb)
Definition: pixfmt.h:93
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:144
time.h
Frame::pos
int64_t pos
Definition: ffplay.c:159
VideoState::frame_last_returned_time
double frame_last_returned_time
Definition: ffplay.c:281
set_clock_at
static void set_clock_at(Clock *c, double pts, int serial, double time)
Definition: ffplay.c:1440
toggle_pause
static void toggle_pause(VideoState *is)
Definition: ffplay.c:1553
TextureFormatEntry
Definition: ffplay.c:371
AVFilterGraph
Definition: avfilter.h:589
stream_component_open
static int stream_component_open(VideoState *is, int stream_index)
Definition: ffplay.c:2688
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:244
AV_OPT_TYPE_CHLAYOUT
@ AV_OPT_TYPE_CHLAYOUT
Underlying C type is AVChannelLayout.
Definition: opt.h:331
av_packet_ref
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: packet.c:440
AVCodecParameters::ch_layout
AVChannelLayout ch_layout
Audio only.
Definition: codec_par.h:180
VideoState::rdft_data
AVComplexFloat * rdft_data
Definition: ffplay.c:268
av_packet_move_ref
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: packet.c:489
AV_PIX_FMT_NE
#define AV_PIX_FMT_NE(be, le)
Definition: pixfmt.h:508
exp
int8_t exp
Definition: eval.c:73
AVSTREAM_EVENT_FLAG_METADATA_UPDATED
#define AVSTREAM_EVENT_FLAG_METADATA_UPDATED
Definition: avformat.h:862
VideoState::seek_req
int seek_req
Definition: ffplay.c:210
VideoState::SHOW_MODE_WAVES
@ SHOW_MODE_WAVES
Definition: ffplay.c:259
VideoState::audio_clock
double audio_clock
Definition: ffplay.c:234
VideoState::read_pause_return
int read_pause_return
Definition: ffplay.c:214
event_loop
static void event_loop(VideoState *cur_stream)
Definition: ffplay.c:3438
swresample.h
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
VideoState::ytop
int ytop
Definition: ffplay.c:291
AVCodecParameters::sample_rate
int sample_rate
Audio only.
Definition: codec_par.h:184
draw_video_background
static void draw_video_background(VideoState *is)
Definition: ffplay.c:969
av_packet_side_data_get
const AVPacketSideData * av_packet_side_data_get(const AVPacketSideData *sd, int nb_sd, enum AVPacketSideDataType type)
Get side information from a side data array.
Definition: packet.c:644
avcodec_find_decoder
const AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: allcodecs.c:1043
VideoState::sample_array
int16_t sample_array[SAMPLE_ARRAY_SIZE]
Definition: ffplay.c:261
AVFormatContext::nb_streams
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1320
exit_on_mousedown
static int exit_on_mousedown
Definition: ffplay.c:336
AVBufferSrcParameters::frame_rate
AVRational frame_rate
Video only, the frame rate of the input video.
Definition: buffersrc.h:100
AVAlphaMode
AVAlphaMode
Correlation between the alpha channel and color values.
Definition: pixfmt.h:810
AVFilterGraph::scale_sws_opts
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:594
VideoState::iformat
const AVInputFormat * iformat
Definition: ffplay.c:204
Decoder::next_pts_tb
AVRational next_pts_tb
Definition: ffplay.c:198
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1705
VideoState::audioq
PacketQueue audioq
Definition: ffplay.c:241
codec_opts
AVDictionary * codec_opts
Definition: cmdutils.c:58
audio_callback_time
static int64_t audio_callback_time
Definition: ffplay.c:360
AV_OPT_FLAG_FILTERING_PARAM
#define AV_OPT_FLAG_FILTERING_PARAM
A generic parameter which can be set by the user for filtering.
Definition: opt.h:381
avformat_find_stream_info
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: demux.c:2602
Frame::format
int format
Definition: ffplay.c:162
INSERT_FILT
#define INSERT_FILT(name, arg)
f
f
Definition: af_crystalizer.c:122
swr_alloc_set_opts2
int swr_alloc_set_opts2(struct SwrContext **ps, const AVChannelLayout *out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, const AVChannelLayout *in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
Definition: swresample.c:40
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:503
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:519
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
OPT_TYPE_INT
@ OPT_TYPE_INT
Definition: cmdutils.h:84
AVMediaType
AVMediaType
Definition: avutil.h:198
av_log_set_callback
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:492
AVPacket::size
int size
Definition: packet.h:589
VideoState::in_audio_filter
AVFilterContext * in_audio_filter
Definition: ffplay.c:297
AVFifo
Definition: fifo.c:35
avformat_match_stream_specifier
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
Definition: avformat.c:616
VideoState::audio_write_buf_size
int audio_write_buf_size
Definition: ffplay.c:248
avformat_alloc_context
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:162
AVDISCARD_DEFAULT
@ AVDISCARD_DEFAULT
discard useless packets like 0 size packets in avi
Definition: defs.h:227
height
#define height
Definition: dsp.h:89
FrameQueue::mutex
SDL_mutex * mutex
Definition: ffplay.c:176
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:319
frame_queue_peek_writable
static Frame * frame_queue_peek_writable(FrameQueue *f)
Definition: ffplay.c:746
OPT_AUDIO
#define OPT_AUDIO
Definition: cmdutils.h:213
av_buffersrc_parameters_alloc
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:107
Frame::sub
AVSubtitle sub
Definition: ffplay.c:155
VideoState::last_audio_stream
int last_audio_stream
Definition: ffplay.c:301
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
vfilters_list
static const char ** vfilters_list
Definition: ffplay.c:347
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:590
create_hwaccel
static int create_hwaccel(AVBufferRef **device_ctx)
Definition: ffplay.c:2651
decoder_init
static int decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
Definition: ffplay.c:568
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:149
AVBufferSrcParameters::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
sdl_supported_color_spaces
static enum AVColorSpace sdl_supported_color_spaces[]
Definition: ffplay.c:942
start_time
static int64_t start_time
Definition: ffplay.c:328
audio_stream
static AVStream * audio_stream
Definition: demux_decode.c:42
VideoState::SHOW_MODE_NB
@ SHOW_MODE_NB
Definition: ffplay.c:259
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1039
Frame::serial
int serial
Definition: ffplay.c:156
AVCodecContext::pkt_timebase
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are expressed.
Definition: avcodec.h:550
hypot
static av_const double hypot(double x, double y)
Definition: libm.h:368
uninit_opts
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents.
Definition: cmdutils.c:62
size
int size
Definition: twinvq_data.h:10344
VideoState::xpos
int xpos
Definition: ffplay.c:269
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
read_thread
static int read_thread(void *arg)
Definition: ffplay.c:2878
AV_PIX_FMT_BGR555
#define AV_PIX_FMT_BGR555
Definition: pixfmt.h:532
avformat_seek_file
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
Definition: seek.c:664
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
Clock::paused
int paused
Definition: ffplay.c:144
rect::h
int h
Definition: f_ebur128.c:78
VideoState::sub_texture
SDL_Texture * sub_texture
Definition: ffplay.c:273
AVStream::event_flags
int event_flags
Flags indicating events happening on the stream, a combination of AVSTREAM_EVENT_FLAG_*.
Definition: avformat.h:855
swr_free
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
Definition: swresample.c:119
AVFrameSideData::data
uint8_t * data
Definition: frame.h:284
VideoState::vid_texture
SDL_Texture * vid_texture
Definition: ffplay.c:274
OPT_TYPE_INT64
@ OPT_TYPE_INT64
Definition: cmdutils.h:85
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:514
VideoState::sample_array_index
int sample_array_index
Definition: ffplay.c:262
wanted_stream_spec
static const char * wanted_stream_spec[AVMEDIA_TYPE_NB]
Definition: ffplay.c:319
infinite_buffer
static int infinite_buffer
Definition: ffplay.c:339
AVSubtitle::end_display_time
uint32_t end_display_time
Definition: avcodec.h:2093
VideoState::max_frame_duration
double max_frame_duration
Definition: ffplay.c:286
avdevice.h
show_banner
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: opt_common.c:235
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
packet_queue_destroy
static void packet_queue_destroy(PacketQueue *q)
Definition: ffplay.c:506
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:587
AVBufferSrcParameters::time_base
AVRational time_base
The timebase to be used for the timestamps on the input frames.
Definition: buffersrc.h:82
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:511
VideoState::frame_drops_early
int frame_drops_early
Definition: ffplay.c:255
update_video_pts
static void update_video_pts(VideoState *is, double pts, int serial)
Definition: ffplay.c:1621
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
toggle_mute
static void toggle_mute(VideoState *is)
Definition: ffplay.c:1559
decoder_abort
static void decoder_abort(Decoder *d, FrameQueue *fq)
Definition: ffplay.c:819
video_refresh
static void video_refresh(void *opaque, double *remaining_time)
Definition: ffplay.c:1629
AV_CHANNEL_ORDER_NATIVE
@ AV_CHANNEL_ORDER_NATIVE
The native channel order, i.e.
Definition: channel_layout.h:125
seek_interval
static float seek_interval
Definition: ffplay.c:321
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:63
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:233
rect::x
int x
Definition: f_ebur128.c:78
VideoState::seek_pos
int64_t seek_pos
Definition: ffplay.c:212
OPT_TYPE_FUNC
@ OPT_TYPE_FUNC
Definition: cmdutils.h:81
frame_queue_push
static void frame_queue_push(FrameQueue *f)
Definition: ffplay.c:778
audio_dev
static SDL_AudioDeviceID audio_dev
Definition: ffplay.c:367
OPT_TYPE_BOOL
@ OPT_TYPE_BOOL
Definition: cmdutils.h:82
av_tx_uninit
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
Definition: tx.c:295
sigterm_handler
static void sigterm_handler(int sig)
Definition: ffplay.c:1373
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:809
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
packet_queue_abort
static void packet_queue_abort(PacketQueue *q)
Definition: ffplay.c:514
video_codec_name
static const char * video_codec_name
Definition: ffplay.c:343
buffersink.h
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
iformat
static const AVInputFormat * iformat
Definition: ffprobe.c:339
packet_queue_flush
static void packet_queue_flush(PacketQueue *q)
Definition: ffplay.c:492
av_channel_layout_default
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Definition: channel_layout.c:839
packet_queue_get
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
Definition: ffplay.c:534
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:40
MAX_QUEUE_SIZE
#define MAX_QUEUE_SIZE
Definition: ffplay.c:65
avcodec_get_name
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:406
MIN_FRAMES
#define MIN_FRAMES
Definition: ffplay.c:66
nb_vfilters
static int nb_vfilters
Definition: ffplay.c:348
VideoState::queue_attachments_req
int queue_attachments_req
Definition: ffplay.c:209
avcodec_send_packet
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:709
FrameQueue::windex
int windex
Definition: ffplay.c:171
VideoState::filename
char * filename
Definition: ffplay.c:290
VideoState::muted
int muted
Definition: ffplay.c:250
Decoder::start_pts
int64_t start_pts
Definition: ffplay.c:195
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:507
bprint.h
Clock::pts
double pts
Definition: ffplay.c:139
av_buffersrc_parameters_set
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:121
av_hwdevice_ctx_create_derived
int av_hwdevice_ctx_create_derived(AVBufferRef **dst_ref_ptr, enum AVHWDeviceType type, AVBufferRef *src_ref, int flags)
Create a new device of the specified type from an existing device.
Definition: hwcontext.c:718
VIDEO_PICTURE_QUEUE_SIZE
#define VIDEO_PICTURE_QUEUE_SIZE
Definition: ffplay.c:125
PacketQueue::serial
int serial
Definition: ffplay.c:120
AVSubtitle::format
uint16_t format
Definition: avcodec.h:2091
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:581
VideoState::show_mode
enum VideoState::ShowMode show_mode
AVBufferSrcParameters::width
int width
Video only, the display dimensions of the input frames.
Definition: buffersrc.h:87
VideoState::audio_src
struct AudioParams audio_src
Definition: ffplay.c:251
program_birth_year
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffplay.c:63
VideoState::audio_buf1
uint8_t * audio_buf1
Definition: ffplay.c:244
OPT_TYPE_TIME
@ OPT_TYPE_TIME
Definition: cmdutils.h:88
av_get_bytes_per_sample
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:108
avfilter_graph_parse_ptr
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs, void *log_ctx)
Add a graph described by a string to a graph.
Definition: graphparser.c:920
setup_find_stream_info_opts
int setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *local_codec_opts, AVDictionary ***dst)
Setup AVCodecContext options for avformat_find_stream_info().
Definition: cmdutils.c:1491
swr_opts
AVDictionary * swr_opts
Definition: cmdutils.c:57
compute_mod
static int compute_mod(int a, int b)
Definition: ffplay.c:1096
Decoder::start_pts_tb
AVRational start_pts_tb
Definition: ffplay.c:196
AVCodecParameters::height
int height
Definition: codec_par.h:135
AV_TIME_BASE
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:253
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:488
Decoder::pkt
AVPacket * pkt
Definition: ffplay.c:188
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
video_open
static int video_open(VideoState *is)
Definition: ffplay.c:1390
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:700
get_sdl_pix_fmt_and_blendmode
static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
Definition: ffplay.c:890
show_status
static int show_status
Definition: ffplay.c:326
opt_format
static int opt_format(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3657
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
parse_options
int parse_options(void *optctx, int argc, char **argv, const OptionDef *options, int(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:420
AV_PIX_FMT_RGB555
#define AV_PIX_FMT_RGB555
Definition: pixfmt.h:527
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:523
vk_renderer_resize
int vk_renderer_resize(VkRenderer *renderer, int width, int height)
Definition: ffplay_renderer.c:863
borderless
static int borderless
Definition: ffplay.c:323
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
update_sample_display
static void update_sample_display(VideoState *is, short *samples, int samples_size)
Definition: ffplay.c:2354
MyAVPacketList
Definition: ffplay.c:109
OPT_FUNC_ARG
#define OPT_FUNC_ARG
Definition: cmdutils.h:205
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:58
AVProgram
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1188
AVCodecContext::hw_device_ctx
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:1483
av_read_pause
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
Definition: demux_utils.c:182
len
int len
Definition: vorbis_enc_data.h:426
Frame::frame
AVFrame * frame
Definition: ffplay.c:154
AV_PIX_FMT_BGR565
#define AV_PIX_FMT_BGR565
Definition: pixfmt.h:531
av_samples_get_buffer_size
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:121
AVFilterPad::name
const char * name
Pad name.
Definition: filters.h:45
vk_renderer
static VkRenderer * vk_renderer
Definition: ffplay.c:369
PacketQueue::nb_packets
int nb_packets
Definition: ffplay.c:116
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
sdl_supported_alpha_modes
static enum AVAlphaMode sdl_supported_alpha_modes[]
Definition: ffplay.c:948
FRAME_QUEUE_SIZE
#define FRAME_QUEUE_SIZE
Definition: ffplay.c:128
frame_queue_peek_readable
static Frame * frame_queue_peek_readable(FrameQueue *f)
Definition: ffplay.c:762
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:310
AVIOInterruptCB::opaque
void * opaque
Definition: avio.h:61
AV_PIX_FMT_RGB565
#define AV_PIX_FMT_RGB565
Definition: pixfmt.h:526
parse_loglevel
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:556
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
Clock::serial
int serial
Definition: ffplay.c:143
VideoState::height
int height
Definition: ffplay.c:291
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
AVStream::disposition
int disposition
Stream disposition - a combination of AV_DISPOSITION_* flags.
Definition: avformat.h:813
AVFMT_FLAG_GENPTS
#define AVFMT_FLAG_GENPTS
Generate missing pts even if it requires parsing future frames.
Definition: avformat.h:1416
VideoState::subpq
FrameQueue subpq
Definition: ffplay.c:223
av_buffer_allocz
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:93
seek_by_bytes
static int seek_by_bytes
Definition: ffplay.c:320
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:744
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:204
EXTERNAL_CLOCK_MAX_FRAMES
#define EXTERNAL_CLOCK_MAX_FRAMES
Definition: ffplay.c:68
av_guess_frame_rate
AVRational av_guess_frame_rate(AVFormatContext *format, AVStream *st, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: avformat.c:685
AVSubtitleRect::h
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:2067
stream_open
static VideoState * stream_open(const char *filename, const AVInputFormat *iformat)
Definition: ffplay.c:3227
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVALPHA_MODE_UNSPECIFIED
@ AVALPHA_MODE_UNSPECIFIED
Unknown alpha handling, or no alpha channel.
Definition: pixfmt.h:811
avcodec_flush_buffers
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal codec state / flush internal buffers.
Definition: avcodec.c:379
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
AV_PIX_FMT_0RGB32
#define AV_PIX_FMT_0RGB32
Definition: pixfmt.h:515
VideoState::vfilter_idx
int vfilter_idx
Definition: ffplay.c:294
filter_nbthreads
static int filter_nbthreads
Definition: ffplay.c:352
log_callback_help
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
Definition: cmdutils.c:70
cursor_hidden
static int cursor_hidden
Definition: ffplay.c:346
VideoState::SHOW_MODE_RDFT
@ SHOW_MODE_RDFT
Definition: ffplay.c:259
av_hwdevice_ctx_create
int av_hwdevice_ctx_create(AVBufferRef **pdevice_ref, enum AVHWDeviceType type, const char *device, AVDictionary *opts, int flags)
Open a device of the specified type and create an AVHWDeviceContext for it.
Definition: hwcontext.c:615
find_stream_info
static int find_stream_info
Definition: ffplay.c:351
packet_queue_put_private
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:420
hwaccel
static const char * hwaccel
Definition: ffplay.c:356
pos
unsigned int pos
Definition: spdifenc.c:414
VideoState::audio_buf_index
int audio_buf_index
Definition: ffplay.c:247
avformat.h
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:122
VideoState::out_video_filter
AVFilterContext * out_video_filter
Definition: ffplay.c:296
dict.h
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:524
VideoState::last_paused
int last_paused
Definition: ffplay.c:208
AV_LOG_SKIP_REPEATED
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:400
remove_avoptions
void remove_avoptions(AVDictionary **a, AVDictionary *b)
Definition: cmdutils.c:1596
AV_PIX_FMT_UYVY422
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:88
CMDUTILS_COMMON_OPTIONS
#define CMDUTILS_COMMON_OPTIONS
Definition: opt_common.h:199
AV_DICT_MATCH_CASE
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:74
rdftspeed
double rdftspeed
Definition: ffplay.c:344
AV_TX_FLOAT_RDFT
@ AV_TX_FLOAT_RDFT
Real to complex and complex to real DFTs.
Definition: tx.h:90
MyAVPacketList::serial
int serial
Definition: ffplay.c:111
opt_width
static int opt_width(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3635
enable_vulkan
static int enable_vulkan
Definition: ffplay.c:353
main
int main(int argc, char **argv)
Definition: ffplay.c:3838
avformat_network_deinit
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:573
show_usage
static void show_usage(void)
Definition: ffplay.c:3799
AVBufferSrcParameters::color_range
enum AVColorRange color_range
Definition: buffersrc.h:122
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:28
AVCodecContext
main external API structure.
Definition: avcodec.h:439
AVFrame::height
int height
Definition: frame.h:499
PacketQueue::mutex
SDL_mutex * mutex
Definition: ffplay.c:121
packet_queue_start
static void packet_queue_start(PacketQueue *q)
Definition: ffplay.c:525
VideoState::vidclk
Clock vidclk
Definition: ffplay.c:219
audio_codec_name
static const char * audio_codec_name
Definition: ffplay.c:341
channel_layout.h
AVBufferSrcParameters
This structure contains the parameters describing the frames that will be passed to this filter.
Definition: buffersrc.h:73
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
vk_renderer_display
int vk_renderer_display(VkRenderer *renderer, AVFrame *frame, RenderParams *render_params)
Definition: ffplay_renderer.c:858
AV_SYNC_FRAMEDUP_THRESHOLD
#define AV_SYNC_FRAMEDUP_THRESHOLD
Definition: ffplay.c:83
show_mode
static enum ShowMode show_mode
Definition: ffplay.c:340
PacketQueue::cond
SDL_cond * cond
Definition: ffplay.c:122
AVBufferSrcParameters::format
int format
video: the pixel format, value corresponds to enum AVPixelFormat audio: the sample format,...
Definition: buffersrc.h:78
PacketQueue::size
int size
Definition: ffplay.c:117
sws_scale
int attribute_align_arg sws_scale(SwsContext *sws, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:1512
opt_common.h
AVInputFormat::flags
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_NOTIMESTAMPS,...
Definition: avformat.h:563
avfilter_init_dict
int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
Initialize a filter with the supplied dictionary of options.
Definition: avfilter.c:918
AVRational::den
int den
Denominator.
Definition: rational.h:60
VIDEO_BACKGROUND_NONE
@ VIDEO_BACKGROUND_NONE
Definition: ffplay_renderer.h:33
VideoState::in_video_filter
AVFilterContext * in_video_filter
Definition: ffplay.c:295
VideoState::subtitle_stream
int subtitle_stream
Definition: ffplay.c:276
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
avfilter.h
VideoState::abort_request
int abort_request
Definition: ffplay.c:205
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:828
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:442
VideoState::audio_buf1_size
unsigned int audio_buf1_size
Definition: ffplay.c:246
VideoState::eof
int eof
Definition: ffplay.c:288
av_dict_parse_string
int av_dict_parse_string(AVDictionary **pm, const char *str, const char *key_val_sep, const char *pairs_sep, int flags)
Parse the key/value pairs list and add the parsed entries to a dictionary.
Definition: dict.c:210
AV_SYNC_THRESHOLD_MAX
#define AV_SYNC_THRESHOLD_MAX
Definition: ffplay.c:81
decoder_destroy
static void decoder_destroy(Decoder *d)
Definition: ffplay.c:680
av_get_packed_sample_fmt
enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
Get the packed alternative form of the given sample format.
Definition: samplefmt.c:77
VideoState::read_tid
SDL_Thread * read_tid
Definition: ffplay.c:203
VideoState::audio_volume
int audio_volume
Definition: ffplay.c:249
VideoState::subdec
Decoder subdec
Definition: ffplay.c:228
AVIOContext::eof_reached
int eof_reached
true if was unable to read due to error or eof
Definition: avio.h:238
stream_has_enough_packets
static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue)
Definition: ffplay.c:2854
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
VideoState::out_audio_filter
AVFilterContext * out_audio_filter
Definition: ffplay.c:298
av_find_input_format
const AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
Definition: format.c:146
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
AV_OPT_TYPE_PIXEL_FMT
@ AV_OPT_TYPE_PIXEL_FMT
Underlying C type is enum AVPixelFormat.
Definition: opt.h:307
AVFormatContext::duration
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1399
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVPacket::stream_index
int stream_index
Definition: packet.h:590
GROW_ARRAY
#define GROW_ARRAY(array, nb_elems)
Definition: cmdutils.h:536
SUBPICTURE_QUEUE_SIZE
#define SUBPICTURE_QUEUE_SIZE
Definition: ffplay.c:126
AVFilterContext
An instance of a filter.
Definition: avfilter.h:274
input_filename
static const char * input_filename
Definition: ffplay.c:308
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:449
stream_toggle_pause
static void stream_toggle_pause(VideoState *is)
Definition: ffplay.c:1540
VideoState::continue_read_thread
SDL_cond * continue_read_thread
Definition: ffplay.c:303
vulkan_params
static char * vulkan_params
Definition: ffplay.c:354
av_dict_set_int
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set() that converts the value to a string and stores it.
Definition: dict.c:177
toggle_audio_display
static void toggle_audio_display(VideoState *is)
Definition: ffplay.c:3381
AVCodecContext::codec_type
enum AVMediaType codec_type
Definition: avcodec.h:447
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
A generic parameter which can be set by the user for demuxing or decoding.
Definition: opt.h:356
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
AVFMT_TS_DISCONT
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:480
VideoState::real_data
float * real_data
Definition: ffplay.c:267
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
get_video_frame
static int get_video_frame(VideoState *is, AVFrame *frame)
Definition: ffplay.c:1827
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:282
default_width
static int default_width
Definition: ffplay.c:310
configure_filtergraph
static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph, AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
Definition: ffplay.c:1860
AVIOInterruptCB::callback
int(* callback)(void *)
Definition: avio.h:60
w
uint8_t w
Definition: llvidencdsp.c:39
VideoState::realtime
int realtime
Definition: ffplay.c:216
VideoState::sub_convert_ctx
struct SwsContext * sub_convert_ctx
Definition: ffplay.c:287
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVDictionaryEntry
Definition: dict.h:90
OPT_TYPE_STRING
@ OPT_TYPE_STRING
Definition: cmdutils.h:83
AVPacket
This structure stores compressed data.
Definition: packet.h:565
audio_disable
static int audio_disable
Definition: ffplay.c:316
refresh_loop_wait_event
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
Definition: ffplay.c:3393
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:86
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:557
stream_component_close
static void stream_component_close(VideoState *is, int stream_index)
Definition: ffplay.c:1252
VideoState::subtitleq
PacketQueue subtitleq
Definition: ffplay.c:278
cmdutils.h
AVPacket::pos
int64_t pos
byte position in stream, -1 if unknown
Definition: packet.h:608
cmp_audio_fmts
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
Definition: ffplay.c:410
Decoder::decoder_tid
SDL_Thread * decoder_tid
Definition: ffplay.c:199
int32_t
int32_t
Definition: audioconvert.c:56
framedrop
static int framedrop
Definition: ffplay.c:338
VideoState::audio_stream
int audio_stream
Definition: ffplay.c:230
dump_dictionary
void dump_dictionary(void *ctx, const AVDictionary *m, const char *name, const char *indent, int log_level)
This does the same as libavformat/dump.c corresponding function and should probably be kept in sync w...
Definition: cmdutils.c:1616
VideoState::audio_buf_size
unsigned int audio_buf_size
Definition: ffplay.c:245
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:472
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVFormatContext::start_time
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1389
PacketQueue::abort_request
int abort_request
Definition: ffplay.c:119
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
VideoState::ic
AVFormatContext * ic
Definition: ffplay.c:215
VideoState::viddec
Decoder viddec
Definition: ffplay.c:227
ns
#define ns(max_value, name, subs,...)
Definition: cbs_av1.c:640
h
h
Definition: vp9dsp_template.c:2070
AVERROR_EXIT
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:58
AVDictionaryEntry::value
char * value
Definition: dict.h:92
AVStream::start_time
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base.
Definition: avformat.h:793
VideoState::audio_buf
uint8_t * audio_buf
Definition: ffplay.c:243
AVFilterGraph::nb_filters
unsigned nb_filters
Definition: avfilter.h:592
avstring.h
frame_queue_peek_last
static Frame * frame_queue_peek_last(FrameQueue *f)
Definition: ffplay.c:741
VideoState::last_vis_time
double last_vis_time
Definition: ffplay.c:270
width
#define width
Definition: dsp.h:89
stream_seek
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int by_bytes)
Definition: ffplay.c:1526
decoder_reorder_pts
static int decoder_reorder_pts
Definition: ffplay.c:333
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:702
VideoState::audio_tgt
struct AudioParams audio_tgt
Definition: ffplay.c:253
afilters
static char * afilters
Definition: ffplay.c:349
AVChapter::time_base
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1225
SwsContext
Main external API structure.
Definition: swscale.h:189
VideoState::audclk
Clock audclk
Definition: ffplay.c:218
avfilter_get_class
const AVClass * avfilter_get_class(void)
Definition: avfilter.c:1658
AVFilterInOut
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:746
print_error
static void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.h:472
FrameQueue::pktq
PacketQueue * pktq
Definition: ffplay.c:178
AV_SYNC_VIDEO_MASTER
@ AV_SYNC_VIDEO_MASTER
Definition: ffplay.c:183
short
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are short
Definition: writing_filters.txt:89
snprintf
#define snprintf
Definition: snprintf.h:34
video_audio_display
static void video_audio_display(VideoState *s)
Definition: ffplay.c:1101
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
AV_SYNC_THRESHOLD_MIN
#define AV_SYNC_THRESHOLD_MIN
Definition: ffplay.c:79
buffersrc.h
AudioParams::bytes_per_sec
int bytes_per_sec
Definition: ffplay.c:135
check_external_clock_speed
static void check_external_clock_speed(VideoState *is)
Definition: ffplay.c:1511
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:42
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2092
SAMPLE_CORRECTION_PERCENT_MAX
#define SAMPLE_CORRECTION_PERCENT_MAX
Definition: ffplay.c:88
EXTERNAL_CLOCK_SPEED_MAX
#define EXTERNAL_CLOCK_SPEED_MAX
Definition: ffplay.c:92
packet_queue_put_nullpacket
static int packet_queue_put_nullpacket(PacketQueue *q, AVPacket *pkt, int stream_index)
Definition: ffplay.c:465
duration
static int64_t duration
Definition: ffplay.c:329
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:63
MyAVPacketList::pkt
AVPacket * pkt
Definition: ffplay.c:110
swscale.h
is_realtime
static int is_realtime(AVFormatContext *s)
Definition: ffplay.c:2861
av_x_if_null
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:311
PacketQueue::pkt_list
AVFifo * pkt_list
Definition: ffplay.c:115
Frame::height
int height
Definition: ffplay.c:161
decode_interrupt_cb
static int decode_interrupt_cb(void *ctx)
Definition: ffplay.c:2848
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3376
VideoState::frame_timer
double frame_timer
Definition: ffplay.c:280
tx.h
VideoState::audio_clock_serial
int audio_clock_serial
Definition: ffplay.c:235
avdevice_register_all
FF_VISIBILITY_POP_HIDDEN av_cold void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:67
avio_feof
int avio_feof(AVIOContext *s)
Similar to feof() but also returns nonzero on read errors.
Definition: aviobuf.c:349
realloc_texture
static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
Definition: ffplay.c:839
AV_PIX_FMT_RGB444
#define AV_PIX_FMT_RGB444
Definition: pixfmt.h:528
exit_on_keydown
static int exit_on_keydown
Definition: ffplay.c:335