FFmpeg
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include "config_components.h"
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/mem.h"
37 #include "libavutil/pixdesc.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/fifo.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/time.h"
42 #include "libavutil/bprint.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/tx.h"
49 
50 #include "libavfilter/avfilter.h"
51 #include "libavfilter/buffersink.h"
52 #include "libavfilter/buffersrc.h"
53 
54 #include <SDL.h>
55 #include <SDL_thread.h>
56 
57 #include "cmdutils.h"
58 #include "ffplay_renderer.h"
59 #include "opt_common.h"
60 
61 const char program_name[] = "ffplay";
62 const int program_birth_year = 2003;
63 
64 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
65 #define MIN_FRAMES 25
66 #define EXTERNAL_CLOCK_MIN_FRAMES 2
67 #define EXTERNAL_CLOCK_MAX_FRAMES 10
68 
69 /* Minimum SDL audio buffer size, in samples. */
70 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
71 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
72 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
73 
74 /* Step size for volume control in dB */
75 #define SDL_VOLUME_STEP (0.75)
76 
77 /* no AV sync correction is done if below the minimum AV sync threshold */
78 #define AV_SYNC_THRESHOLD_MIN 0.04
79 /* AV sync correction is done if above the maximum AV sync threshold */
80 #define AV_SYNC_THRESHOLD_MAX 0.1
81 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
82 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
83 /* no AV correction is done if too big error */
84 #define AV_NOSYNC_THRESHOLD 10.0
85 
86 /* maximum audio speed change to get correct sync */
87 #define SAMPLE_CORRECTION_PERCENT_MAX 10
88 
89 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
90 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
91 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
92 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
93 
94 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
95 #define AUDIO_DIFF_AVG_NB 20
96 
97 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
98 #define REFRESH_RATE 0.01
99 
100 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
101 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
102 #define SAMPLE_ARRAY_SIZE (8 * 65536)
103 
104 #define CURSOR_HIDE_DELAY 1000000
105 
106 #define USE_ONEPASS_SUBTITLE_RENDER 1
107 
108 typedef struct MyAVPacketList {
110  int serial;
112 
113 typedef struct PacketQueue {
116  int size;
119  int serial;
120  SDL_mutex *mutex;
121  SDL_cond *cond;
122 } PacketQueue;
123 
124 #define VIDEO_PICTURE_QUEUE_SIZE 3
125 #define SUBPICTURE_QUEUE_SIZE 16
126 #define SAMPLE_QUEUE_SIZE 9
127 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
128 
129 typedef struct AudioParams {
130  int freq;
135 } AudioParams;
136 
137 typedef struct Clock {
138  double pts; /* clock base */
139  double pts_drift; /* clock base minus time at which we updated the clock */
140  double last_updated;
141  double speed;
142  int serial; /* clock is based on a packet with this serial */
143  int paused;
144  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
145 } Clock;
146 
147 typedef struct FrameData {
149 } FrameData;
150 
151 /* Common struct for handling all types of decoded data and allocated render buffers. */
152 typedef struct Frame {
155  int serial;
156  double pts; /* presentation timestamp for the frame */
157  double duration; /* estimated duration of the frame */
158  int64_t pos; /* byte position of the frame in the input file */
159  int width;
160  int height;
161  int format;
163  int uploaded;
164  int flip_v;
165 } Frame;
166 
167 typedef struct FrameQueue {
169  int rindex;
170  int windex;
171  int size;
172  int max_size;
175  SDL_mutex *mutex;
176  SDL_cond *cond;
178 } FrameQueue;
179 
180 enum {
181  AV_SYNC_AUDIO_MASTER, /* default choice */
183  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
184 };
185 
186 typedef struct Decoder {
191  int finished;
193  SDL_cond *empty_queue_cond;
198  SDL_Thread *decoder_tid;
199 } Decoder;
200 
201 typedef struct VideoState {
202  SDL_Thread *read_tid;
206  int paused;
209  int seek_req;
215  int realtime;
216 
220 
224 
228 
230 
232 
233  double audio_clock;
235  double audio_diff_cum; /* used for AV difference average computation */
242  uint8_t *audio_buf;
243  uint8_t *audio_buf1;
244  unsigned int audio_buf_size; /* in bytes */
245  unsigned int audio_buf1_size;
246  int audio_buf_index; /* in bytes */
249  int muted;
256 
257  enum ShowMode {
259  } show_mode;
266  float *real_data;
268  int xpos;
270  SDL_Texture *vis_texture;
271  SDL_Texture *sub_texture;
272  SDL_Texture *vid_texture;
273 
277 
278  double frame_timer;
284  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
286  int eof;
287 
288  char *filename;
290  int step;
291 
293  AVFilterContext *in_video_filter; // the first filter in the video chain
294  AVFilterContext *out_video_filter; // the last filter in the video chain
295  AVFilterContext *in_audio_filter; // the first filter in the audio chain
296  AVFilterContext *out_audio_filter; // the last filter in the audio chain
297  AVFilterGraph *agraph; // audio filter graph
298 
300 
302 } VideoState;
303 
304 /* options specified by the user */
306 static const char *input_filename;
307 static const char *window_title;
308 static int default_width = 640;
309 static int default_height = 480;
310 static int screen_width = 0;
311 static int screen_height = 0;
312 static int screen_left = SDL_WINDOWPOS_CENTERED;
313 static int screen_top = SDL_WINDOWPOS_CENTERED;
314 static int audio_disable;
315 static int video_disable;
316 static int subtitle_disable;
317 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
318 static int seek_by_bytes = -1;
319 static float seek_interval = 10;
320 static int display_disable;
321 static int borderless;
322 static int alwaysontop;
323 static int startup_volume = 100;
324 static int show_status = -1;
328 static int fast = 0;
329 static int genpts = 0;
330 static int lowres = 0;
331 static int decoder_reorder_pts = -1;
332 static int autoexit;
333 static int exit_on_keydown;
334 static int exit_on_mousedown;
335 static int loop = 1;
336 static int framedrop = -1;
337 static int infinite_buffer = -1;
338 static enum ShowMode show_mode = SHOW_MODE_NONE;
339 static const char *audio_codec_name;
340 static const char *subtitle_codec_name;
341 static const char *video_codec_name;
342 double rdftspeed = 0.02;
344 static int cursor_hidden = 0;
345 static const char **vfilters_list = NULL;
346 static int nb_vfilters = 0;
347 static char *afilters = NULL;
348 static int autorotate = 1;
349 static int find_stream_info = 1;
350 static int filter_nbthreads = 0;
351 static int enable_vulkan = 0;
352 static char *vulkan_params = NULL;
353 static const char *hwaccel = NULL;
354 
355 /* current context */
356 static int is_full_screen;
358 
359 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
360 
361 static SDL_Window *window;
362 static SDL_Renderer *renderer;
363 static SDL_RendererInfo renderer_info = {0};
364 static SDL_AudioDeviceID audio_dev;
365 
367 
368 static const struct TextureFormatEntry {
372  { AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
373  { AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
374  { AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
375  { AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
376  { AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
377  { AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
378  { AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
379  { AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
380  { AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
381  { AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
382  { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
383  { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
384  { AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
385  { AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
386  { AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
387  { AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
388  { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
389  { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
390  { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
391  { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
392 };
393 
394 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
395 {
397  if (ret < 0)
398  return ret;
399 
401  if (!vfilters_list[nb_vfilters - 1])
402  return AVERROR(ENOMEM);
403 
404  return 0;
405 }
406 
407 static inline
408 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
409  enum AVSampleFormat fmt2, int64_t channel_count2)
410 {
411  /* If channel count == 1, planar and non-planar formats are the same */
412  if (channel_count1 == 1 && channel_count2 == 1)
414  else
415  return channel_count1 != channel_count2 || fmt1 != fmt2;
416 }
417 
419 {
420  MyAVPacketList pkt1;
421  int ret;
422 
423  if (q->abort_request)
424  return -1;
425 
426 
427  pkt1.pkt = pkt;
428  pkt1.serial = q->serial;
429 
430  ret = av_fifo_write(q->pkt_list, &pkt1, 1);
431  if (ret < 0)
432  return ret;
433  q->nb_packets++;
434  q->size += pkt1.pkt->size + sizeof(pkt1);
435  q->duration += pkt1.pkt->duration;
436  /* XXX: should duplicate packet data in DV case */
437  SDL_CondSignal(q->cond);
438  return 0;
439 }
440 
442 {
443  AVPacket *pkt1;
444  int ret;
445 
446  pkt1 = av_packet_alloc();
447  if (!pkt1) {
449  return -1;
450  }
451  av_packet_move_ref(pkt1, pkt);
452 
453  SDL_LockMutex(q->mutex);
454  ret = packet_queue_put_private(q, pkt1);
455  SDL_UnlockMutex(q->mutex);
456 
457  if (ret < 0)
458  av_packet_free(&pkt1);
459 
460  return ret;
461 }
462 
463 static int packet_queue_put_nullpacket(PacketQueue *q, AVPacket *pkt, int stream_index)
464 {
465  pkt->stream_index = stream_index;
466  return packet_queue_put(q, pkt);
467 }
468 
469 /* packet queue handling */
471 {
472  memset(q, 0, sizeof(PacketQueue));
474  if (!q->pkt_list)
475  return AVERROR(ENOMEM);
476  q->mutex = SDL_CreateMutex();
477  if (!q->mutex) {
478  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
479  return AVERROR(ENOMEM);
480  }
481  q->cond = SDL_CreateCond();
482  if (!q->cond) {
483  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
484  return AVERROR(ENOMEM);
485  }
486  q->abort_request = 1;
487  return 0;
488 }
489 
491 {
492  MyAVPacketList pkt1;
493 
494  SDL_LockMutex(q->mutex);
495  while (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0)
496  av_packet_free(&pkt1.pkt);
497  q->nb_packets = 0;
498  q->size = 0;
499  q->duration = 0;
500  q->serial++;
501  SDL_UnlockMutex(q->mutex);
502 }
503 
505 {
508  SDL_DestroyMutex(q->mutex);
509  SDL_DestroyCond(q->cond);
510 }
511 
513 {
514  SDL_LockMutex(q->mutex);
515 
516  q->abort_request = 1;
517 
518  SDL_CondSignal(q->cond);
519 
520  SDL_UnlockMutex(q->mutex);
521 }
522 
524 {
525  SDL_LockMutex(q->mutex);
526  q->abort_request = 0;
527  q->serial++;
528  SDL_UnlockMutex(q->mutex);
529 }
530 
531 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
532 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
533 {
534  MyAVPacketList pkt1;
535  int ret;
536 
537  SDL_LockMutex(q->mutex);
538 
539  for (;;) {
540  if (q->abort_request) {
541  ret = -1;
542  break;
543  }
544 
545  if (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0) {
546  q->nb_packets--;
547  q->size -= pkt1.pkt->size + sizeof(pkt1);
548  q->duration -= pkt1.pkt->duration;
549  av_packet_move_ref(pkt, pkt1.pkt);
550  if (serial)
551  *serial = pkt1.serial;
552  av_packet_free(&pkt1.pkt);
553  ret = 1;
554  break;
555  } else if (!block) {
556  ret = 0;
557  break;
558  } else {
559  SDL_CondWait(q->cond, q->mutex);
560  }
561  }
562  SDL_UnlockMutex(q->mutex);
563  return ret;
564 }
565 
566 static int decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
567  memset(d, 0, sizeof(Decoder));
568  d->pkt = av_packet_alloc();
569  if (!d->pkt)
570  return AVERROR(ENOMEM);
571  d->avctx = avctx;
572  d->queue = queue;
573  d->empty_queue_cond = empty_queue_cond;
575  d->pkt_serial = -1;
576  return 0;
577 }
578 
580  int ret = AVERROR(EAGAIN);
581 
582  for (;;) {
583  if (d->queue->serial == d->pkt_serial) {
584  do {
585  if (d->queue->abort_request)
586  return -1;
587 
588  switch (d->avctx->codec_type) {
589  case AVMEDIA_TYPE_VIDEO:
591  if (ret >= 0) {
592  if (decoder_reorder_pts == -1) {
593  frame->pts = frame->best_effort_timestamp;
594  } else if (!decoder_reorder_pts) {
595  frame->pts = frame->pkt_dts;
596  }
597  }
598  break;
599  case AVMEDIA_TYPE_AUDIO:
601  if (ret >= 0) {
602  AVRational tb = (AVRational){1, frame->sample_rate};
603  if (frame->pts != AV_NOPTS_VALUE)
604  frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);
605  else if (d->next_pts != AV_NOPTS_VALUE)
606  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
607  if (frame->pts != AV_NOPTS_VALUE) {
608  d->next_pts = frame->pts + frame->nb_samples;
609  d->next_pts_tb = tb;
610  }
611  }
612  break;
613  }
614  if (ret == AVERROR_EOF) {
615  d->finished = d->pkt_serial;
617  return 0;
618  }
619  if (ret >= 0)
620  return 1;
621  } while (ret != AVERROR(EAGAIN));
622  }
623 
624  do {
625  if (d->queue->nb_packets == 0)
626  SDL_CondSignal(d->empty_queue_cond);
627  if (d->packet_pending) {
628  d->packet_pending = 0;
629  } else {
630  int old_serial = d->pkt_serial;
631  if (packet_queue_get(d->queue, d->pkt, 1, &d->pkt_serial) < 0)
632  return -1;
633  if (old_serial != d->pkt_serial) {
635  d->finished = 0;
636  d->next_pts = d->start_pts;
637  d->next_pts_tb = d->start_pts_tb;
638  }
639  }
640  if (d->queue->serial == d->pkt_serial)
641  break;
642  av_packet_unref(d->pkt);
643  } while (1);
644 
646  int got_frame = 0;
647  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, d->pkt);
648  if (ret < 0) {
649  ret = AVERROR(EAGAIN);
650  } else {
651  if (got_frame && !d->pkt->data) {
652  d->packet_pending = 1;
653  }
654  ret = got_frame ? 0 : (d->pkt->data ? AVERROR(EAGAIN) : AVERROR_EOF);
655  }
656  av_packet_unref(d->pkt);
657  } else {
658  if (d->pkt->buf && !d->pkt->opaque_ref) {
659  FrameData *fd;
660 
661  d->pkt->opaque_ref = av_buffer_allocz(sizeof(*fd));
662  if (!d->pkt->opaque_ref)
663  return AVERROR(ENOMEM);
664  fd = (FrameData*)d->pkt->opaque_ref->data;
665  fd->pkt_pos = d->pkt->pos;
666  }
667 
668  if (avcodec_send_packet(d->avctx, d->pkt) == AVERROR(EAGAIN)) {
669  av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
670  d->packet_pending = 1;
671  } else {
672  av_packet_unref(d->pkt);
673  }
674  }
675  }
676 }
677 
678 static void decoder_destroy(Decoder *d) {
679  av_packet_free(&d->pkt);
681 }
682 
684 {
685  av_frame_unref(vp->frame);
686  avsubtitle_free(&vp->sub);
687 }
688 
689 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
690 {
691  int i;
692  memset(f, 0, sizeof(FrameQueue));
693  if (!(f->mutex = SDL_CreateMutex())) {
694  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
695  return AVERROR(ENOMEM);
696  }
697  if (!(f->cond = SDL_CreateCond())) {
698  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
699  return AVERROR(ENOMEM);
700  }
701  f->pktq = pktq;
702  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
703  f->keep_last = !!keep_last;
704  for (i = 0; i < f->max_size; i++)
705  if (!(f->queue[i].frame = av_frame_alloc()))
706  return AVERROR(ENOMEM);
707  return 0;
708 }
709 
711 {
712  int i;
713  for (i = 0; i < f->max_size; i++) {
714  Frame *vp = &f->queue[i];
716  av_frame_free(&vp->frame);
717  }
718  SDL_DestroyMutex(f->mutex);
719  SDL_DestroyCond(f->cond);
720 }
721 
723 {
724  SDL_LockMutex(f->mutex);
725  SDL_CondSignal(f->cond);
726  SDL_UnlockMutex(f->mutex);
727 }
728 
730 {
731  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
732 }
733 
735 {
736  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
737 }
738 
740 {
741  return &f->queue[f->rindex];
742 }
743 
745 {
746  /* wait until we have space to put a new frame */
747  SDL_LockMutex(f->mutex);
748  while (f->size >= f->max_size &&
749  !f->pktq->abort_request) {
750  SDL_CondWait(f->cond, f->mutex);
751  }
752  SDL_UnlockMutex(f->mutex);
753 
754  if (f->pktq->abort_request)
755  return NULL;
756 
757  return &f->queue[f->windex];
758 }
759 
761 {
762  /* wait until we have a readable a new frame */
763  SDL_LockMutex(f->mutex);
764  while (f->size - f->rindex_shown <= 0 &&
765  !f->pktq->abort_request) {
766  SDL_CondWait(f->cond, f->mutex);
767  }
768  SDL_UnlockMutex(f->mutex);
769 
770  if (f->pktq->abort_request)
771  return NULL;
772 
773  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
774 }
775 
777 {
778  if (++f->windex == f->max_size)
779  f->windex = 0;
780  SDL_LockMutex(f->mutex);
781  f->size++;
782  SDL_CondSignal(f->cond);
783  SDL_UnlockMutex(f->mutex);
784 }
785 
787 {
788  if (f->keep_last && !f->rindex_shown) {
789  f->rindex_shown = 1;
790  return;
791  }
792  frame_queue_unref_item(&f->queue[f->rindex]);
793  if (++f->rindex == f->max_size)
794  f->rindex = 0;
795  SDL_LockMutex(f->mutex);
796  f->size--;
797  SDL_CondSignal(f->cond);
798  SDL_UnlockMutex(f->mutex);
799 }
800 
801 /* return the number of undisplayed frames in the queue */
803 {
804  return f->size - f->rindex_shown;
805 }
806 
807 /* return last shown position */
809 {
810  Frame *fp = &f->queue[f->rindex];
811  if (f->rindex_shown && fp->serial == f->pktq->serial)
812  return fp->pos;
813  else
814  return -1;
815 }
816 
817 static void decoder_abort(Decoder *d, FrameQueue *fq)
818 {
820  frame_queue_signal(fq);
821  SDL_WaitThread(d->decoder_tid, NULL);
822  d->decoder_tid = NULL;
824 }
825 
826 static inline void fill_rectangle(int x, int y, int w, int h)
827 {
828  SDL_Rect rect;
829  rect.x = x;
830  rect.y = y;
831  rect.w = w;
832  rect.h = h;
833  if (w && h)
834  SDL_RenderFillRect(renderer, &rect);
835 }
836 
837 static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
838 {
839  Uint32 format;
840  int access, w, h;
841  if (!*texture || SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
842  void *pixels;
843  int pitch;
844  if (*texture)
845  SDL_DestroyTexture(*texture);
846  if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
847  return -1;
848  if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
849  return -1;
850  if (init_texture) {
851  if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
852  return -1;
853  memset(pixels, 0, pitch * new_height);
854  SDL_UnlockTexture(*texture);
855  }
856  av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
857  }
858  return 0;
859 }
860 
861 static void calculate_display_rect(SDL_Rect *rect,
862  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
863  int pic_width, int pic_height, AVRational pic_sar)
864 {
865  AVRational aspect_ratio = pic_sar;
866  int64_t width, height, x, y;
867 
868  if (av_cmp_q(aspect_ratio, av_make_q(0, 1)) <= 0)
869  aspect_ratio = av_make_q(1, 1);
870 
871  aspect_ratio = av_mul_q(aspect_ratio, av_make_q(pic_width, pic_height));
872 
873  /* XXX: we suppose the screen has a 1.0 pixel ratio */
874  height = scr_height;
875  width = av_rescale(height, aspect_ratio.num, aspect_ratio.den) & ~1;
876  if (width > scr_width) {
877  width = scr_width;
878  height = av_rescale(width, aspect_ratio.den, aspect_ratio.num) & ~1;
879  }
880  x = (scr_width - width) / 2;
881  y = (scr_height - height) / 2;
882  rect->x = scr_xleft + x;
883  rect->y = scr_ytop + y;
884  rect->w = FFMAX((int)width, 1);
885  rect->h = FFMAX((int)height, 1);
886 }
887 
888 static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
889 {
890  int i;
891  *sdl_blendmode = SDL_BLENDMODE_NONE;
892  *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
893  if (format == AV_PIX_FMT_RGB32 ||
897  *sdl_blendmode = SDL_BLENDMODE_BLEND;
898  for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
900  *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
901  return;
902  }
903  }
904 }
905 
906 static int upload_texture(SDL_Texture **tex, AVFrame *frame)
907 {
908  int ret = 0;
909  Uint32 sdl_pix_fmt;
910  SDL_BlendMode sdl_blendmode;
911  get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
912  if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
913  return -1;
914  switch (sdl_pix_fmt) {
915  case SDL_PIXELFORMAT_IYUV:
916  if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
917  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
918  frame->data[1], frame->linesize[1],
919  frame->data[2], frame->linesize[2]);
920  } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
921  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
922  frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
923  frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
924  } else {
925  av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
926  return -1;
927  }
928  break;
929  default:
930  if (frame->linesize[0] < 0) {
931  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
932  } else {
933  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
934  }
935  break;
936  }
937  return ret;
938 }
939 
945 };
946 
948 {
949 #if SDL_VERSION_ATLEAST(2,0,8)
950  SDL_YUV_CONVERSION_MODE mode = SDL_YUV_CONVERSION_AUTOMATIC;
951  if (frame && (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUYV422 || frame->format == AV_PIX_FMT_UYVY422)) {
952  if (frame->color_range == AVCOL_RANGE_JPEG)
953  mode = SDL_YUV_CONVERSION_JPEG;
954  else if (frame->colorspace == AVCOL_SPC_BT709)
955  mode = SDL_YUV_CONVERSION_BT709;
956  else if (frame->colorspace == AVCOL_SPC_BT470BG || frame->colorspace == AVCOL_SPC_SMPTE170M)
957  mode = SDL_YUV_CONVERSION_BT601;
958  }
959  SDL_SetYUVConversionMode(mode); /* FIXME: no support for linear transfer */
960 #endif
961 }
962 
964 {
965  Frame *vp;
966  Frame *sp = NULL;
967  SDL_Rect rect;
968 
969  vp = frame_queue_peek_last(&is->pictq);
970  if (vk_renderer) {
972  return;
973  }
974 
975  if (is->subtitle_st) {
976  if (frame_queue_nb_remaining(&is->subpq) > 0) {
977  sp = frame_queue_peek(&is->subpq);
978 
979  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
980  if (!sp->uploaded) {
981  uint8_t* pixels[4];
982  int pitch[4];
983  int i;
984  if (!sp->width || !sp->height) {
985  sp->width = vp->width;
986  sp->height = vp->height;
987  }
988  if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
989  return;
990 
991  for (i = 0; i < sp->sub.num_rects; i++) {
992  AVSubtitleRect *sub_rect = sp->sub.rects[i];
993 
994  sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
995  sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
996  sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
997  sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
998 
999  is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx,
1000  sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
1001  sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
1002  0, NULL, NULL, NULL);
1003  if (!is->sub_convert_ctx) {
1004  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1005  return;
1006  }
1007  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) {
1008  sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
1009  0, sub_rect->h, pixels, pitch);
1010  SDL_UnlockTexture(is->sub_texture);
1011  }
1012  }
1013  sp->uploaded = 1;
1014  }
1015  } else
1016  sp = NULL;
1017  }
1018  }
1019 
1020  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1022 
1023  if (!vp->uploaded) {
1024  if (upload_texture(&is->vid_texture, vp->frame) < 0) {
1026  return;
1027  }
1028  vp->uploaded = 1;
1029  vp->flip_v = vp->frame->linesize[0] < 0;
1030  }
1031 
1032  SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
1034  if (sp) {
1035 #if USE_ONEPASS_SUBTITLE_RENDER
1036  SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
1037 #else
1038  int i;
1039  double xratio = (double)rect.w / (double)sp->width;
1040  double yratio = (double)rect.h / (double)sp->height;
1041  for (i = 0; i < sp->sub.num_rects; i++) {
1042  SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
1043  SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
1044  .y = rect.y + sub_rect->y * yratio,
1045  .w = sub_rect->w * xratio,
1046  .h = sub_rect->h * yratio};
1047  SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
1048  }
1049 #endif
1050  }
1051 }
1052 
1053 static inline int compute_mod(int a, int b)
1054 {
1055  return a < 0 ? a%b + b : a%b;
1056 }
1057 
1059 {
1060  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1061  int ch, channels, h, h2;
1062  int64_t time_diff;
1063  int rdft_bits, nb_freq;
1064 
1065  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1066  ;
1067  nb_freq = 1 << (rdft_bits - 1);
1068 
1069  /* compute display index : center on currently output samples */
1070  channels = s->audio_tgt.ch_layout.nb_channels;
1071  nb_display_channels = channels;
1072  if (!s->paused) {
1073  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1074  n = 2 * channels;
1075  delay = s->audio_write_buf_size;
1076  delay /= n;
1077 
1078  /* to be more precise, we take into account the time spent since
1079  the last buffer computation */
1080  if (audio_callback_time) {
1081  time_diff = av_gettime_relative() - audio_callback_time;
1082  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1083  }
1084 
1085  delay += 2 * data_used;
1086  if (delay < data_used)
1087  delay = data_used;
1088 
1089  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1090  if (s->show_mode == SHOW_MODE_WAVES) {
1091  h = INT_MIN;
1092  for (i = 0; i < 1000; i += channels) {
1093  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1094  int a = s->sample_array[idx];
1095  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1096  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1097  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1098  int score = a - d;
1099  if (h < score && (b ^ c) < 0) {
1100  h = score;
1101  i_start = idx;
1102  }
1103  }
1104  }
1105 
1106  s->last_i_start = i_start;
1107  } else {
1108  i_start = s->last_i_start;
1109  }
1110 
1111  if (s->show_mode == SHOW_MODE_WAVES) {
1112  SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
1113 
1114  /* total height for one channel */
1115  h = s->height / nb_display_channels;
1116  /* graph height / 2 */
1117  h2 = (h * 9) / 20;
1118  for (ch = 0; ch < nb_display_channels; ch++) {
1119  i = i_start + ch;
1120  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1121  for (x = 0; x < s->width; x++) {
1122  y = (s->sample_array[i] * h2) >> 15;
1123  if (y < 0) {
1124  y = -y;
1125  ys = y1 - y;
1126  } else {
1127  ys = y1;
1128  }
1129  fill_rectangle(s->xleft + x, ys, 1, y);
1130  i += channels;
1131  if (i >= SAMPLE_ARRAY_SIZE)
1132  i -= SAMPLE_ARRAY_SIZE;
1133  }
1134  }
1135 
1136  SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
1137 
1138  for (ch = 1; ch < nb_display_channels; ch++) {
1139  y = s->ytop + ch * h;
1140  fill_rectangle(s->xleft, y, s->width, 1);
1141  }
1142  } else {
1143  int err = 0;
1144  if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
1145  return;
1146 
1147  if (s->xpos >= s->width)
1148  s->xpos = 0;
1149  nb_display_channels= FFMIN(nb_display_channels, 2);
1150  if (rdft_bits != s->rdft_bits) {
1151  const float rdft_scale = 1.0;
1152  av_tx_uninit(&s->rdft);
1153  av_freep(&s->real_data);
1154  av_freep(&s->rdft_data);
1155  s->rdft_bits = rdft_bits;
1156  s->real_data = av_malloc_array(nb_freq, 4 *sizeof(*s->real_data));
1157  s->rdft_data = av_malloc_array(nb_freq + 1, 2 *sizeof(*s->rdft_data));
1158  err = av_tx_init(&s->rdft, &s->rdft_fn, AV_TX_FLOAT_RDFT,
1159  0, 1 << rdft_bits, &rdft_scale, 0);
1160  }
1161  if (err < 0 || !s->rdft_data) {
1162  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1163  s->show_mode = SHOW_MODE_WAVES;
1164  } else {
1165  float *data_in[2];
1166  AVComplexFloat *data[2];
1167  SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
1168  uint32_t *pixels;
1169  int pitch;
1170  for (ch = 0; ch < nb_display_channels; ch++) {
1171  data_in[ch] = s->real_data + 2 * nb_freq * ch;
1172  data[ch] = s->rdft_data + nb_freq * ch;
1173  i = i_start + ch;
1174  for (x = 0; x < 2 * nb_freq; x++) {
1175  double w = (x-nb_freq) * (1.0 / nb_freq);
1176  data_in[ch][x] = s->sample_array[i] * (1.0 - w * w);
1177  i += channels;
1178  if (i >= SAMPLE_ARRAY_SIZE)
1179  i -= SAMPLE_ARRAY_SIZE;
1180  }
1181  s->rdft_fn(s->rdft, data[ch], data_in[ch], sizeof(float));
1182  data[ch][0].im = data[ch][nb_freq].re;
1183  data[ch][nb_freq].re = 0;
1184  }
1185  /* Least efficient way to do this, we should of course
1186  * directly access it but it is more than fast enough. */
1187  if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
1188  pitch >>= 2;
1189  pixels += pitch * s->height;
1190  for (y = 0; y < s->height; y++) {
1191  double w = 1 / sqrt(nb_freq);
1192  int a = sqrt(w * sqrt(data[0][y].re * data[0][y].re + data[0][y].im * data[0][y].im));
1193  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][y].re, data[1][y].im))
1194  : a;
1195  a = FFMIN(a, 255);
1196  b = FFMIN(b, 255);
1197  pixels -= pitch;
1198  *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
1199  }
1200  SDL_UnlockTexture(s->vis_texture);
1201  }
1202  SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
1203  }
1204  if (!s->paused)
1205  s->xpos++;
1206  }
1207 }
1208 
1209 static void stream_component_close(VideoState *is, int stream_index)
1210 {
1211  AVFormatContext *ic = is->ic;
1212  AVCodecParameters *codecpar;
1213 
1214  if (stream_index < 0 || stream_index >= ic->nb_streams)
1215  return;
1216  codecpar = ic->streams[stream_index]->codecpar;
1217 
1218  switch (codecpar->codec_type) {
1219  case AVMEDIA_TYPE_AUDIO:
1220  decoder_abort(&is->auddec, &is->sampq);
1221  SDL_CloseAudioDevice(audio_dev);
1222  decoder_destroy(&is->auddec);
1223  swr_free(&is->swr_ctx);
1224  av_freep(&is->audio_buf1);
1225  is->audio_buf1_size = 0;
1226  is->audio_buf = NULL;
1227 
1228  if (is->rdft) {
1229  av_tx_uninit(&is->rdft);
1230  av_freep(&is->real_data);
1231  av_freep(&is->rdft_data);
1232  is->rdft = NULL;
1233  is->rdft_bits = 0;
1234  }
1235  break;
1236  case AVMEDIA_TYPE_VIDEO:
1237  decoder_abort(&is->viddec, &is->pictq);
1238  decoder_destroy(&is->viddec);
1239  break;
1240  case AVMEDIA_TYPE_SUBTITLE:
1241  decoder_abort(&is->subdec, &is->subpq);
1242  decoder_destroy(&is->subdec);
1243  break;
1244  default:
1245  break;
1246  }
1247 
1248  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1249  switch (codecpar->codec_type) {
1250  case AVMEDIA_TYPE_AUDIO:
1251  is->audio_st = NULL;
1252  is->audio_stream = -1;
1253  break;
1254  case AVMEDIA_TYPE_VIDEO:
1255  is->video_st = NULL;
1256  is->video_stream = -1;
1257  break;
1258  case AVMEDIA_TYPE_SUBTITLE:
1259  is->subtitle_st = NULL;
1260  is->subtitle_stream = -1;
1261  break;
1262  default:
1263  break;
1264  }
1265 }
1266 
1268 {
1269  /* XXX: use a special url_shutdown call to abort parse cleanly */
1270  is->abort_request = 1;
1271  SDL_WaitThread(is->read_tid, NULL);
1272 
1273  /* close each stream */
1274  if (is->audio_stream >= 0)
1275  stream_component_close(is, is->audio_stream);
1276  if (is->video_stream >= 0)
1277  stream_component_close(is, is->video_stream);
1278  if (is->subtitle_stream >= 0)
1279  stream_component_close(is, is->subtitle_stream);
1280 
1281  avformat_close_input(&is->ic);
1282 
1283  packet_queue_destroy(&is->videoq);
1284  packet_queue_destroy(&is->audioq);
1285  packet_queue_destroy(&is->subtitleq);
1286 
1287  /* free all pictures */
1288  frame_queue_destroy(&is->pictq);
1289  frame_queue_destroy(&is->sampq);
1290  frame_queue_destroy(&is->subpq);
1291  SDL_DestroyCond(is->continue_read_thread);
1292  sws_freeContext(is->sub_convert_ctx);
1293  av_free(is->filename);
1294  if (is->vis_texture)
1295  SDL_DestroyTexture(is->vis_texture);
1296  if (is->vid_texture)
1297  SDL_DestroyTexture(is->vid_texture);
1298  if (is->sub_texture)
1299  SDL_DestroyTexture(is->sub_texture);
1300  av_free(is);
1301 }
1302 
1303 static void do_exit(VideoState *is)
1304 {
1305  if (is) {
1306  stream_close(is);
1307  }
1308  if (renderer)
1309  SDL_DestroyRenderer(renderer);
1310  if (vk_renderer)
1312  if (window)
1313  SDL_DestroyWindow(window);
1314  uninit_opts();
1315  for (int i = 0; i < nb_vfilters; i++)
1323  if (show_status)
1324  printf("\n");
1325  SDL_Quit();
1326  av_log(NULL, AV_LOG_QUIET, "%s", "");
1327  exit(0);
1328 }
1329 
1330 static void sigterm_handler(int sig)
1331 {
1332  exit(123);
1333 }
1334 
1336 {
1337  SDL_Rect rect;
1338  int max_width = screen_width ? screen_width : INT_MAX;
1339  int max_height = screen_height ? screen_height : INT_MAX;
1340  if (max_width == INT_MAX && max_height == INT_MAX)
1341  max_height = height;
1342  calculate_display_rect(&rect, 0, 0, max_width, max_height, width, height, sar);
1343  default_width = rect.w;
1344  default_height = rect.h;
1345 }
1346 
1348 {
1349  int w,h;
1350 
1353 
1354  if (!window_title)
1356  SDL_SetWindowTitle(window, window_title);
1357 
1358  SDL_SetWindowSize(window, w, h);
1359  SDL_SetWindowPosition(window, screen_left, screen_top);
1360  if (is_full_screen)
1361  SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1362  SDL_ShowWindow(window);
1363 
1364  is->width = w;
1365  is->height = h;
1366 
1367  return 0;
1368 }
1369 
1370 /* display the current picture, if any */
1372 {
1373  if (!is->width)
1374  video_open(is);
1375 
1376  SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1377  SDL_RenderClear(renderer);
1378  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1380  else if (is->video_st)
1382  SDL_RenderPresent(renderer);
1383 }
1384 
1385 static double get_clock(Clock *c)
1386 {
1387  if (*c->queue_serial != c->serial)
1388  return NAN;
1389  if (c->paused) {
1390  return c->pts;
1391  } else {
1392  double time = av_gettime_relative() / 1000000.0;
1393  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1394  }
1395 }
1396 
1397 static void set_clock_at(Clock *c, double pts, int serial, double time)
1398 {
1399  c->pts = pts;
1400  c->last_updated = time;
1401  c->pts_drift = c->pts - time;
1402  c->serial = serial;
1403 }
1404 
1405 static void set_clock(Clock *c, double pts, int serial)
1406 {
1407  double time = av_gettime_relative() / 1000000.0;
1408  set_clock_at(c, pts, serial, time);
1409 }
1410 
1411 static void set_clock_speed(Clock *c, double speed)
1412 {
1413  set_clock(c, get_clock(c), c->serial);
1414  c->speed = speed;
1415 }
1416 
1417 static void init_clock(Clock *c, int *queue_serial)
1418 {
1419  c->speed = 1.0;
1420  c->paused = 0;
1421  c->queue_serial = queue_serial;
1422  set_clock(c, NAN, -1);
1423 }
1424 
1425 static void sync_clock_to_slave(Clock *c, Clock *slave)
1426 {
1427  double clock = get_clock(c);
1428  double slave_clock = get_clock(slave);
1429  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1430  set_clock(c, slave_clock, slave->serial);
1431 }
1432 
1434  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1435  if (is->video_st)
1436  return AV_SYNC_VIDEO_MASTER;
1437  else
1438  return AV_SYNC_AUDIO_MASTER;
1439  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1440  if (is->audio_st)
1441  return AV_SYNC_AUDIO_MASTER;
1442  else
1443  return AV_SYNC_EXTERNAL_CLOCK;
1444  } else {
1445  return AV_SYNC_EXTERNAL_CLOCK;
1446  }
1447 }
1448 
1449 /* get the current master clock value */
1451 {
1452  double val;
1453 
1454  switch (get_master_sync_type(is)) {
1455  case AV_SYNC_VIDEO_MASTER:
1456  val = get_clock(&is->vidclk);
1457  break;
1458  case AV_SYNC_AUDIO_MASTER:
1459  val = get_clock(&is->audclk);
1460  break;
1461  default:
1462  val = get_clock(&is->extclk);
1463  break;
1464  }
1465  return val;
1466 }
1467 
1469  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1470  is->audio_stream >= 0 && is->audioq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES) {
1472  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1473  (is->audio_stream < 0 || is->audioq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES)) {
1475  } else {
1476  double speed = is->extclk.speed;
1477  if (speed != 1.0)
1478  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1479  }
1480 }
1481 
1482 /* seek in the stream */
1483 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int by_bytes)
1484 {
1485  if (!is->seek_req) {
1486  is->seek_pos = pos;
1487  is->seek_rel = rel;
1488  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1489  if (by_bytes)
1490  is->seek_flags |= AVSEEK_FLAG_BYTE;
1491  is->seek_req = 1;
1492  SDL_CondSignal(is->continue_read_thread);
1493  }
1494 }
1495 
1496 /* pause or resume the video */
1498 {
1499  if (is->paused) {
1500  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1501  if (is->read_pause_return != AVERROR(ENOSYS)) {
1502  is->vidclk.paused = 0;
1503  }
1504  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1505  }
1506  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1507  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1508 }
1509 
1511 {
1513  is->step = 0;
1514 }
1515 
1517 {
1518  is->muted = !is->muted;
1519 }
1520 
1521 static void update_volume(VideoState *is, int sign, double step)
1522 {
1523  double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1524  int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0));
1525  is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1526 }
1527 
1529 {
1530  /* if the stream is paused unpause it, then step */
1531  if (is->paused)
1533  is->step = 1;
1534 }
1535 
1536 static double compute_target_delay(double delay, VideoState *is)
1537 {
1538  double sync_threshold, diff = 0;
1539 
1540  /* update delay to follow master synchronisation source */
1542  /* if video is slave, we try to correct big delays by
1543  duplicating or deleting a frame */
1544  diff = get_clock(&is->vidclk) - get_master_clock(is);
1545 
1546  /* skip or repeat frame. We take into account the
1547  delay to compute the threshold. I still don't know
1548  if it is the best guess */
1549  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1550  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1551  if (diff <= -sync_threshold)
1552  delay = FFMAX(0, delay + diff);
1553  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1554  delay = delay + diff;
1555  else if (diff >= sync_threshold)
1556  delay = 2 * delay;
1557  }
1558  }
1559 
1560  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1561  delay, -diff);
1562 
1563  return delay;
1564 }
1565 
1566 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1567  if (vp->serial == nextvp->serial) {
1568  double duration = nextvp->pts - vp->pts;
1569  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1570  return vp->duration;
1571  else
1572  return duration;
1573  } else {
1574  return 0.0;
1575  }
1576 }
1577 
1578 static void update_video_pts(VideoState *is, double pts, int serial)
1579 {
1580  /* update current video pts */
1581  set_clock(&is->vidclk, pts, serial);
1582  sync_clock_to_slave(&is->extclk, &is->vidclk);
1583 }
1584 
1585 /* called to display each frame */
1586 static void video_refresh(void *opaque, double *remaining_time)
1587 {
1588  VideoState *is = opaque;
1589  double time;
1590 
1591  Frame *sp, *sp2;
1592 
1593  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1595 
1596  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1597  time = av_gettime_relative() / 1000000.0;
1598  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1599  video_display(is);
1600  is->last_vis_time = time;
1601  }
1602  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1603  }
1604 
1605  if (is->video_st) {
1606 retry:
1607  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1608  // nothing to do, no picture to display in the queue
1609  } else {
1610  double last_duration, duration, delay;
1611  Frame *vp, *lastvp;
1612 
1613  /* dequeue the picture */
1614  lastvp = frame_queue_peek_last(&is->pictq);
1615  vp = frame_queue_peek(&is->pictq);
1616 
1617  if (vp->serial != is->videoq.serial) {
1618  frame_queue_next(&is->pictq);
1619  goto retry;
1620  }
1621 
1622  if (lastvp->serial != vp->serial)
1623  is->frame_timer = av_gettime_relative() / 1000000.0;
1624 
1625  if (is->paused)
1626  goto display;
1627 
1628  /* compute nominal last_duration */
1629  last_duration = vp_duration(is, lastvp, vp);
1630  delay = compute_target_delay(last_duration, is);
1631 
1632  time= av_gettime_relative()/1000000.0;
1633  if (time < is->frame_timer + delay) {
1634  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1635  goto display;
1636  }
1637 
1638  is->frame_timer += delay;
1639  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1640  is->frame_timer = time;
1641 
1642  SDL_LockMutex(is->pictq.mutex);
1643  if (!isnan(vp->pts))
1644  update_video_pts(is, vp->pts, vp->serial);
1645  SDL_UnlockMutex(is->pictq.mutex);
1646 
1647  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1648  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1649  duration = vp_duration(is, vp, nextvp);
1650  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1651  is->frame_drops_late++;
1652  frame_queue_next(&is->pictq);
1653  goto retry;
1654  }
1655  }
1656 
1657  if (is->subtitle_st) {
1658  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1659  sp = frame_queue_peek(&is->subpq);
1660 
1661  if (frame_queue_nb_remaining(&is->subpq) > 1)
1662  sp2 = frame_queue_peek_next(&is->subpq);
1663  else
1664  sp2 = NULL;
1665 
1666  if (sp->serial != is->subtitleq.serial
1667  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1668  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1669  {
1670  if (sp->uploaded) {
1671  int i;
1672  for (i = 0; i < sp->sub.num_rects; i++) {
1673  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1674  uint8_t *pixels;
1675  int pitch, j;
1676 
1677  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
1678  for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1679  memset(pixels, 0, sub_rect->w << 2);
1680  SDL_UnlockTexture(is->sub_texture);
1681  }
1682  }
1683  }
1684  frame_queue_next(&is->subpq);
1685  } else {
1686  break;
1687  }
1688  }
1689  }
1690 
1691  frame_queue_next(&is->pictq);
1692  is->force_refresh = 1;
1693 
1694  if (is->step && !is->paused)
1696  }
1697 display:
1698  /* display picture */
1699  if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1700  video_display(is);
1701  }
1702  is->force_refresh = 0;
1703  if (show_status) {
1704  AVBPrint buf;
1705  static int64_t last_time;
1706  int64_t cur_time;
1707  int aqsize, vqsize, sqsize;
1708  double av_diff;
1709 
1710  cur_time = av_gettime_relative();
1711  if (!last_time || (cur_time - last_time) >= 30000) {
1712  aqsize = 0;
1713  vqsize = 0;
1714  sqsize = 0;
1715  if (is->audio_st)
1716  aqsize = is->audioq.size;
1717  if (is->video_st)
1718  vqsize = is->videoq.size;
1719  if (is->subtitle_st)
1720  sqsize = is->subtitleq.size;
1721  av_diff = 0;
1722  if (is->audio_st && is->video_st)
1723  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1724  else if (is->video_st)
1725  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1726  else if (is->audio_st)
1727  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1728 
1730  av_bprintf(&buf,
1731  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB \r",
1733  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1734  av_diff,
1735  is->frame_drops_early + is->frame_drops_late,
1736  aqsize / 1024,
1737  vqsize / 1024,
1738  sqsize);
1739 
1740  if (show_status == 1 && AV_LOG_INFO > av_log_get_level())
1741  fprintf(stderr, "%s", buf.str);
1742  else
1743  av_log(NULL, AV_LOG_INFO, "%s", buf.str);
1744 
1745  fflush(stderr);
1746  av_bprint_finalize(&buf, NULL);
1747 
1748  last_time = cur_time;
1749  }
1750  }
1751 }
1752 
1753 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1754 {
1755  Frame *vp;
1756 
1757 #if defined(DEBUG_SYNC)
1758  printf("frame_type=%c pts=%0.3f\n",
1759  av_get_picture_type_char(src_frame->pict_type), pts);
1760 #endif
1761 
1762  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1763  return -1;
1764 
1765  vp->sar = src_frame->sample_aspect_ratio;
1766  vp->uploaded = 0;
1767 
1768  vp->width = src_frame->width;
1769  vp->height = src_frame->height;
1770  vp->format = src_frame->format;
1771 
1772  vp->pts = pts;
1773  vp->duration = duration;
1774  vp->pos = pos;
1775  vp->serial = serial;
1776 
1777  set_default_window_size(vp->width, vp->height, vp->sar);
1778 
1779  av_frame_move_ref(vp->frame, src_frame);
1780  frame_queue_push(&is->pictq);
1781  return 0;
1782 }
1783 
1785 {
1786  int got_picture;
1787 
1788  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1789  return -1;
1790 
1791  if (got_picture) {
1792  double dpts = NAN;
1793 
1794  if (frame->pts != AV_NOPTS_VALUE)
1795  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1796 
1797  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1798 
1800  if (frame->pts != AV_NOPTS_VALUE) {
1801  double diff = dpts - get_master_clock(is);
1802  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1803  diff - is->frame_last_filter_delay < 0 &&
1804  is->viddec.pkt_serial == is->vidclk.serial &&
1805  is->videoq.nb_packets) {
1806  is->frame_drops_early++;
1808  got_picture = 0;
1809  }
1810  }
1811  }
1812  }
1813 
1814  return got_picture;
1815 }
1816 
1817 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1818  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1819 {
1820  int ret, i;
1821  int nb_filters = graph->nb_filters;
1823 
1824  if (filtergraph) {
1827  if (!outputs || !inputs) {
1828  ret = AVERROR(ENOMEM);
1829  goto fail;
1830  }
1831 
1832  outputs->name = av_strdup("in");
1833  outputs->filter_ctx = source_ctx;
1834  outputs->pad_idx = 0;
1835  outputs->next = NULL;
1836 
1837  inputs->name = av_strdup("out");
1838  inputs->filter_ctx = sink_ctx;
1839  inputs->pad_idx = 0;
1840  inputs->next = NULL;
1841 
1842  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1843  goto fail;
1844  } else {
1845  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1846  goto fail;
1847  }
1848 
1849  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1850  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1851  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1852 
1853  ret = avfilter_graph_config(graph, NULL);
1854 fail:
1857  return ret;
1858 }
1859 
1860 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1861 {
1863  char sws_flags_str[512] = "";
1864  char buffersrc_args[256];
1865  int ret;
1866  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1867  AVCodecParameters *codecpar = is->video_st->codecpar;
1868  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1869  const AVDictionaryEntry *e = NULL;
1870  int nb_pix_fmts = 0;
1871  int i, j;
1873 
1874  if (!par)
1875  return AVERROR(ENOMEM);
1876 
1877  for (i = 0; i < renderer_info.num_texture_formats; i++) {
1878  for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; j++) {
1879  if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
1880  pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
1881  break;
1882  }
1883  }
1884  }
1885  pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE;
1886 
1887  while ((e = av_dict_iterate(sws_dict, e))) {
1888  if (!strcmp(e->key, "sws_flags")) {
1889  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1890  } else
1891  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1892  }
1893  if (strlen(sws_flags_str))
1894  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1895 
1896  graph->scale_sws_opts = av_strdup(sws_flags_str);
1897 
1898  snprintf(buffersrc_args, sizeof(buffersrc_args),
1899  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d:"
1900  "colorspace=%d:range=%d",
1901  frame->width, frame->height, frame->format,
1902  is->video_st->time_base.num, is->video_st->time_base.den,
1903  codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1),
1904  frame->colorspace, frame->color_range);
1905  if (fr.num && fr.den)
1906  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1907 
1908  if ((ret = avfilter_graph_create_filter(&filt_src,
1909  avfilter_get_by_name("buffer"),
1910  "ffplay_buffer", buffersrc_args, NULL,
1911  graph)) < 0)
1912  goto fail;
1913  par->hw_frames_ctx = frame->hw_frames_ctx;
1914  ret = av_buffersrc_parameters_set(filt_src, par);
1915  if (ret < 0)
1916  goto fail;
1917 
1918  ret = avfilter_graph_create_filter(&filt_out,
1919  avfilter_get_by_name("buffersink"),
1920  "ffplay_buffersink", NULL, NULL, graph);
1921  if (ret < 0)
1922  goto fail;
1923 
1924  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1925  goto fail;
1926  if (!vk_renderer &&
1928  goto fail;
1929 
1930  last_filter = filt_out;
1931 
1932 /* Note: this macro adds a filter before the lastly added filter, so the
1933  * processing order of the filters is in reverse */
1934 #define INSERT_FILT(name, arg) do { \
1935  AVFilterContext *filt_ctx; \
1936  \
1937  ret = avfilter_graph_create_filter(&filt_ctx, \
1938  avfilter_get_by_name(name), \
1939  "ffplay_" name, arg, NULL, graph); \
1940  if (ret < 0) \
1941  goto fail; \
1942  \
1943  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1944  if (ret < 0) \
1945  goto fail; \
1946  \
1947  last_filter = filt_ctx; \
1948 } while (0)
1949 
1950  if (autorotate) {
1951  double theta = 0.0;
1952  int32_t *displaymatrix = NULL;
1954  if (sd)
1955  displaymatrix = (int32_t *)sd->data;
1956  if (!displaymatrix) {
1957  const AVPacketSideData *psd = av_packet_side_data_get(is->video_st->codecpar->coded_side_data,
1958  is->video_st->codecpar->nb_coded_side_data,
1960  if (psd)
1961  displaymatrix = (int32_t *)psd->data;
1962  }
1963  theta = get_rotation(displaymatrix);
1964 
1965  if (fabs(theta - 90) < 1.0) {
1966  INSERT_FILT("transpose", displaymatrix[3] > 0 ? "cclock_flip" : "clock");
1967  } else if (fabs(theta - 180) < 1.0) {
1968  if (displaymatrix[0] < 0)
1969  INSERT_FILT("hflip", NULL);
1970  if (displaymatrix[4] < 0)
1971  INSERT_FILT("vflip", NULL);
1972  } else if (fabs(theta - 270) < 1.0) {
1973  INSERT_FILT("transpose", displaymatrix[3] < 0 ? "clock_flip" : "cclock");
1974  } else if (fabs(theta) > 1.0) {
1975  char rotate_buf[64];
1976  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1977  INSERT_FILT("rotate", rotate_buf);
1978  } else {
1979  if (displaymatrix && displaymatrix[4] < 0)
1980  INSERT_FILT("vflip", NULL);
1981  }
1982  }
1983 
1984  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1985  goto fail;
1986 
1987  is->in_video_filter = filt_src;
1988  is->out_video_filter = filt_out;
1989 
1990 fail:
1991  av_freep(&par);
1992  return ret;
1993 }
1994 
1995 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1996 {
1998  int sample_rates[2] = { 0, -1 };
1999  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
2000  char aresample_swr_opts[512] = "";
2001  const AVDictionaryEntry *e = NULL;
2002  AVBPrint bp;
2003  char asrc_args[256];
2004  int ret;
2005 
2006  avfilter_graph_free(&is->agraph);
2007  if (!(is->agraph = avfilter_graph_alloc()))
2008  return AVERROR(ENOMEM);
2009  is->agraph->nb_threads = filter_nbthreads;
2010 
2012 
2013  while ((e = av_dict_iterate(swr_opts, e)))
2014  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
2015  if (strlen(aresample_swr_opts))
2016  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
2017  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
2018 
2019  av_channel_layout_describe_bprint(&is->audio_filter_src.ch_layout, &bp);
2020 
2021  ret = snprintf(asrc_args, sizeof(asrc_args),
2022  "sample_rate=%d:sample_fmt=%s:time_base=%d/%d:channel_layout=%s",
2023  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
2024  1, is->audio_filter_src.freq, bp.str);
2025 
2026  ret = avfilter_graph_create_filter(&filt_asrc,
2027  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
2028  asrc_args, NULL, is->agraph);
2029  if (ret < 0)
2030  goto end;
2031 
2032 
2033  ret = avfilter_graph_create_filter(&filt_asink,
2034  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
2035  NULL, NULL, is->agraph);
2036  if (ret < 0)
2037  goto end;
2038 
2039  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
2040  goto end;
2041  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
2042  goto end;
2043 
2044  if (force_output_format) {
2045  av_bprint_clear(&bp);
2046  av_channel_layout_describe_bprint(&is->audio_tgt.ch_layout, &bp);
2047  sample_rates [0] = is->audio_tgt.freq;
2048  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
2049  goto end;
2050  if ((ret = av_opt_set(filt_asink, "ch_layouts", bp.str, AV_OPT_SEARCH_CHILDREN)) < 0)
2051  goto end;
2052  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2053  goto end;
2054  }
2055 
2056 
2057  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2058  goto end;
2059 
2060  is->in_audio_filter = filt_asrc;
2061  is->out_audio_filter = filt_asink;
2062 
2063 end:
2064  if (ret < 0)
2065  avfilter_graph_free(&is->agraph);
2066  av_bprint_finalize(&bp, NULL);
2067 
2068  return ret;
2069 }
2070 
2071 static int audio_thread(void *arg)
2072 {
2073  VideoState *is = arg;
2075  Frame *af;
2076  int last_serial = -1;
2077  int reconfigure;
2078  int got_frame = 0;
2079  AVRational tb;
2080  int ret = 0;
2081 
2082  if (!frame)
2083  return AVERROR(ENOMEM);
2084 
2085  do {
2086  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2087  goto the_end;
2088 
2089  if (got_frame) {
2090  tb = (AVRational){1, frame->sample_rate};
2091 
2092  reconfigure =
2093  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.ch_layout.nb_channels,
2094  frame->format, frame->ch_layout.nb_channels) ||
2095  av_channel_layout_compare(&is->audio_filter_src.ch_layout, &frame->ch_layout) ||
2096  is->audio_filter_src.freq != frame->sample_rate ||
2097  is->auddec.pkt_serial != last_serial;
2098 
2099  if (reconfigure) {
2100  char buf1[1024], buf2[1024];
2101  av_channel_layout_describe(&is->audio_filter_src.ch_layout, buf1, sizeof(buf1));
2102  av_channel_layout_describe(&frame->ch_layout, buf2, sizeof(buf2));
2104  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2105  is->audio_filter_src.freq, is->audio_filter_src.ch_layout.nb_channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2106  frame->sample_rate, frame->ch_layout.nb_channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
2107 
2108  is->audio_filter_src.fmt = frame->format;
2109  ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &frame->ch_layout);
2110  if (ret < 0)
2111  goto the_end;
2112  is->audio_filter_src.freq = frame->sample_rate;
2113  last_serial = is->auddec.pkt_serial;
2114 
2115  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2116  goto the_end;
2117  }
2118 
2119  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2120  goto the_end;
2121 
2122  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2123  FrameData *fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL;
2124  tb = av_buffersink_get_time_base(is->out_audio_filter);
2125  if (!(af = frame_queue_peek_writable(&is->sampq)))
2126  goto the_end;
2127 
2128  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2129  af->pos = fd ? fd->pkt_pos : -1;
2130  af->serial = is->auddec.pkt_serial;
2131  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2132 
2134  frame_queue_push(&is->sampq);
2135 
2136  if (is->audioq.serial != is->auddec.pkt_serial)
2137  break;
2138  }
2139  if (ret == AVERROR_EOF)
2140  is->auddec.finished = is->auddec.pkt_serial;
2141  }
2142  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2143  the_end:
2144  avfilter_graph_free(&is->agraph);
2145  av_frame_free(&frame);
2146  return ret;
2147 }
2148 
2149 static int decoder_start(Decoder *d, int (*fn)(void *), const char *thread_name, void* arg)
2150 {
2152  d->decoder_tid = SDL_CreateThread(fn, thread_name, arg);
2153  if (!d->decoder_tid) {
2154  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2155  return AVERROR(ENOMEM);
2156  }
2157  return 0;
2158 }
2159 
2160 static int video_thread(void *arg)
2161 {
2162  VideoState *is = arg;
2164  double pts;
2165  double duration;
2166  int ret;
2167  AVRational tb = is->video_st->time_base;
2168  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2169 
2170  AVFilterGraph *graph = NULL;
2171  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2172  int last_w = 0;
2173  int last_h = 0;
2174  enum AVPixelFormat last_format = -2;
2175  int last_serial = -1;
2176  int last_vfilter_idx = 0;
2177 
2178  if (!frame)
2179  return AVERROR(ENOMEM);
2180 
2181  for (;;) {
2183  if (ret < 0)
2184  goto the_end;
2185  if (!ret)
2186  continue;
2187 
2188  if ( last_w != frame->width
2189  || last_h != frame->height
2190  || last_format != frame->format
2191  || last_serial != is->viddec.pkt_serial
2192  || last_vfilter_idx != is->vfilter_idx) {
2194  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2195  last_w, last_h,
2196  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2197  frame->width, frame->height,
2198  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2199  avfilter_graph_free(&graph);
2200  graph = avfilter_graph_alloc();
2201  if (!graph) {
2202  ret = AVERROR(ENOMEM);
2203  goto the_end;
2204  }
2205  graph->nb_threads = filter_nbthreads;
2206  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2207  SDL_Event event;
2208  event.type = FF_QUIT_EVENT;
2209  event.user.data1 = is;
2210  SDL_PushEvent(&event);
2211  goto the_end;
2212  }
2213  filt_in = is->in_video_filter;
2214  filt_out = is->out_video_filter;
2215  last_w = frame->width;
2216  last_h = frame->height;
2217  last_format = frame->format;
2218  last_serial = is->viddec.pkt_serial;
2219  last_vfilter_idx = is->vfilter_idx;
2220  frame_rate = av_buffersink_get_frame_rate(filt_out);
2221  }
2222 
2223  ret = av_buffersrc_add_frame(filt_in, frame);
2224  if (ret < 0)
2225  goto the_end;
2226 
2227  while (ret >= 0) {
2228  FrameData *fd;
2229 
2230  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2231 
2232  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2233  if (ret < 0) {
2234  if (ret == AVERROR_EOF)
2235  is->viddec.finished = is->viddec.pkt_serial;
2236  ret = 0;
2237  break;
2238  }
2239 
2240  fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL;
2241 
2242  is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
2243  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2244  is->frame_last_filter_delay = 0;
2245  tb = av_buffersink_get_time_base(filt_out);
2246  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2247  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2248  ret = queue_picture(is, frame, pts, duration, fd ? fd->pkt_pos : -1, is->viddec.pkt_serial);
2250  if (is->videoq.serial != is->viddec.pkt_serial)
2251  break;
2252  }
2253 
2254  if (ret < 0)
2255  goto the_end;
2256  }
2257  the_end:
2258  avfilter_graph_free(&graph);
2259  av_frame_free(&frame);
2260  return 0;
2261 }
2262 
2263 static int subtitle_thread(void *arg)
2264 {
2265  VideoState *is = arg;
2266  Frame *sp;
2267  int got_subtitle;
2268  double pts;
2269 
2270  for (;;) {
2271  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2272  return 0;
2273 
2274  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2275  break;
2276 
2277  pts = 0;
2278 
2279  if (got_subtitle && sp->sub.format == 0) {
2280  if (sp->sub.pts != AV_NOPTS_VALUE)
2281  pts = sp->sub.pts / (double)AV_TIME_BASE;
2282  sp->pts = pts;
2283  sp->serial = is->subdec.pkt_serial;
2284  sp->width = is->subdec.avctx->width;
2285  sp->height = is->subdec.avctx->height;
2286  sp->uploaded = 0;
2287 
2288  /* now we can update the picture count */
2289  frame_queue_push(&is->subpq);
2290  } else if (got_subtitle) {
2291  avsubtitle_free(&sp->sub);
2292  }
2293  }
2294  return 0;
2295 }
2296 
2297 /* copy samples for viewing in editor window */
2298 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2299 {
2300  int size, len;
2301 
2302  size = samples_size / sizeof(short);
2303  while (size > 0) {
2304  len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2305  if (len > size)
2306  len = size;
2307  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2308  samples += len;
2309  is->sample_array_index += len;
2310  if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2311  is->sample_array_index = 0;
2312  size -= len;
2313  }
2314 }
2315 
2316 /* return the wanted number of samples to get better sync if sync_type is video
2317  * or external master clock */
2318 static int synchronize_audio(VideoState *is, int nb_samples)
2319 {
2320  int wanted_nb_samples = nb_samples;
2321 
2322  /* if not master, then we try to remove or add samples to correct the clock */
2324  double diff, avg_diff;
2325  int min_nb_samples, max_nb_samples;
2326 
2327  diff = get_clock(&is->audclk) - get_master_clock(is);
2328 
2329  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2330  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2331  if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2332  /* not enough measures to have a correct estimate */
2333  is->audio_diff_avg_count++;
2334  } else {
2335  /* estimate the A-V difference */
2336  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2337 
2338  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2339  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2340  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2341  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2342  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2343  }
2344  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2345  diff, avg_diff, wanted_nb_samples - nb_samples,
2346  is->audio_clock, is->audio_diff_threshold);
2347  }
2348  } else {
2349  /* too big difference : may be initial PTS errors, so
2350  reset A-V filter */
2351  is->audio_diff_avg_count = 0;
2352  is->audio_diff_cum = 0;
2353  }
2354  }
2355 
2356  return wanted_nb_samples;
2357 }
2358 
2359 /**
2360  * Decode one audio frame and return its uncompressed size.
2361  *
2362  * The processed audio frame is decoded, converted if required, and
2363  * stored in is->audio_buf, with size in bytes given by the return
2364  * value.
2365  */
2367 {
2368  int data_size, resampled_data_size;
2369  av_unused double audio_clock0;
2370  int wanted_nb_samples;
2371  Frame *af;
2372 
2373  if (is->paused)
2374  return -1;
2375 
2376  do {
2377 #if defined(_WIN32)
2378  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2379  if ((av_gettime_relative() - audio_callback_time) > 1000000LL * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec / 2)
2380  return -1;
2381  av_usleep (1000);
2382  }
2383 #endif
2384  if (!(af = frame_queue_peek_readable(&is->sampq)))
2385  return -1;
2386  frame_queue_next(&is->sampq);
2387  } while (af->serial != is->audioq.serial);
2388 
2390  af->frame->nb_samples,
2391  af->frame->format, 1);
2392 
2393  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2394 
2395  if (af->frame->format != is->audio_src.fmt ||
2396  av_channel_layout_compare(&af->frame->ch_layout, &is->audio_src.ch_layout) ||
2397  af->frame->sample_rate != is->audio_src.freq ||
2398  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2399  int ret;
2400  swr_free(&is->swr_ctx);
2401  ret = swr_alloc_set_opts2(&is->swr_ctx,
2402  &is->audio_tgt.ch_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2403  &af->frame->ch_layout, af->frame->format, af->frame->sample_rate,
2404  0, NULL);
2405  if (ret < 0 || swr_init(is->swr_ctx) < 0) {
2407  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2409  is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.ch_layout.nb_channels);
2410  swr_free(&is->swr_ctx);
2411  return -1;
2412  }
2413  if (av_channel_layout_copy(&is->audio_src.ch_layout, &af->frame->ch_layout) < 0)
2414  return -1;
2415  is->audio_src.freq = af->frame->sample_rate;
2416  is->audio_src.fmt = af->frame->format;
2417  }
2418 
2419  if (is->swr_ctx) {
2420  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2421  uint8_t **out = &is->audio_buf1;
2422  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2423  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.ch_layout.nb_channels, out_count, is->audio_tgt.fmt, 0);
2424  int len2;
2425  if (out_size < 0) {
2426  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2427  return -1;
2428  }
2429  if (wanted_nb_samples != af->frame->nb_samples) {
2430  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2431  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2432  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2433  return -1;
2434  }
2435  }
2436  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2437  if (!is->audio_buf1)
2438  return AVERROR(ENOMEM);
2439  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2440  if (len2 < 0) {
2441  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2442  return -1;
2443  }
2444  if (len2 == out_count) {
2445  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2446  if (swr_init(is->swr_ctx) < 0)
2447  swr_free(&is->swr_ctx);
2448  }
2449  is->audio_buf = is->audio_buf1;
2450  resampled_data_size = len2 * is->audio_tgt.ch_layout.nb_channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2451  } else {
2452  is->audio_buf = af->frame->data[0];
2453  resampled_data_size = data_size;
2454  }
2455 
2456  audio_clock0 = is->audio_clock;
2457  /* update the audio clock with the pts */
2458  if (!isnan(af->pts))
2459  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2460  else
2461  is->audio_clock = NAN;
2462  is->audio_clock_serial = af->serial;
2463 #ifdef DEBUG
2464  {
2465  static double last_clock;
2466  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2467  is->audio_clock - last_clock,
2468  is->audio_clock, audio_clock0);
2469  last_clock = is->audio_clock;
2470  }
2471 #endif
2472  return resampled_data_size;
2473 }
2474 
2475 /* prepare a new audio buffer */
2476 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2477 {
2478  VideoState *is = opaque;
2479  int audio_size, len1;
2480 
2482 
2483  while (len > 0) {
2484  if (is->audio_buf_index >= is->audio_buf_size) {
2485  audio_size = audio_decode_frame(is);
2486  if (audio_size < 0) {
2487  /* if error, just output silence */
2488  is->audio_buf = NULL;
2489  is->audio_buf_size = SDL_AUDIO_MIN_BUFFER_SIZE / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2490  } else {
2491  if (is->show_mode != SHOW_MODE_VIDEO)
2492  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2493  is->audio_buf_size = audio_size;
2494  }
2495  is->audio_buf_index = 0;
2496  }
2497  len1 = is->audio_buf_size - is->audio_buf_index;
2498  if (len1 > len)
2499  len1 = len;
2500  if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2501  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2502  else {
2503  memset(stream, 0, len1);
2504  if (!is->muted && is->audio_buf)
2505  SDL_MixAudioFormat(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, is->audio_volume);
2506  }
2507  len -= len1;
2508  stream += len1;
2509  is->audio_buf_index += len1;
2510  }
2511  is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2512  /* Let's assume the audio driver that is used by SDL has two periods. */
2513  if (!isnan(is->audio_clock)) {
2514  set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
2515  sync_clock_to_slave(&is->extclk, &is->audclk);
2516  }
2517 }
2518 
2519 static int audio_open(void *opaque, AVChannelLayout *wanted_channel_layout, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2520 {
2521  SDL_AudioSpec wanted_spec, spec;
2522  const char *env;
2523  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2524  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2525  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2526  int wanted_nb_channels = wanted_channel_layout->nb_channels;
2527 
2528  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2529  if (env) {
2530  wanted_nb_channels = atoi(env);
2531  av_channel_layout_uninit(wanted_channel_layout);
2532  av_channel_layout_default(wanted_channel_layout, wanted_nb_channels);
2533  }
2534  if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) {
2535  av_channel_layout_uninit(wanted_channel_layout);
2536  av_channel_layout_default(wanted_channel_layout, wanted_nb_channels);
2537  }
2538  wanted_nb_channels = wanted_channel_layout->nb_channels;
2539  wanted_spec.channels = wanted_nb_channels;
2540  wanted_spec.freq = wanted_sample_rate;
2541  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2542  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2543  return -1;
2544  }
2545  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2546  next_sample_rate_idx--;
2547  wanted_spec.format = AUDIO_S16SYS;
2548  wanted_spec.silence = 0;
2549  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2550  wanted_spec.callback = sdl_audio_callback;
2551  wanted_spec.userdata = opaque;
2552  while (!(audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2553  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2554  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2555  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2556  if (!wanted_spec.channels) {
2557  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2558  wanted_spec.channels = wanted_nb_channels;
2559  if (!wanted_spec.freq) {
2561  "No more combinations to try, audio open failed\n");
2562  return -1;
2563  }
2564  }
2565  av_channel_layout_default(wanted_channel_layout, wanted_spec.channels);
2566  }
2567  if (spec.format != AUDIO_S16SYS) {
2569  "SDL advised audio format %d is not supported!\n", spec.format);
2570  return -1;
2571  }
2572  if (spec.channels != wanted_spec.channels) {
2573  av_channel_layout_uninit(wanted_channel_layout);
2574  av_channel_layout_default(wanted_channel_layout, spec.channels);
2575  if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) {
2577  "SDL advised channel count %d is not supported!\n", spec.channels);
2578  return -1;
2579  }
2580  }
2581 
2582  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2583  audio_hw_params->freq = spec.freq;
2584  if (av_channel_layout_copy(&audio_hw_params->ch_layout, wanted_channel_layout) < 0)
2585  return -1;
2586  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, 1, audio_hw_params->fmt, 1);
2587  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2588  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2589  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2590  return -1;
2591  }
2592  return spec.size;
2593 }
2594 
2595 static int create_hwaccel(AVBufferRef **device_ctx)
2596 {
2597  enum AVHWDeviceType type;
2598  int ret;
2599  AVBufferRef *vk_dev;
2600 
2601  *device_ctx = NULL;
2602 
2603  if (!hwaccel)
2604  return 0;
2605 
2607  if (type == AV_HWDEVICE_TYPE_NONE)
2608  return AVERROR(ENOTSUP);
2609 
2610  if (!vk_renderer) {
2611  av_log(NULL, AV_LOG_ERROR, "Vulkan renderer is not available\n");
2612  return AVERROR(ENOTSUP);
2613  }
2614 
2616  if (ret < 0)
2617  return ret;
2618 
2619  ret = av_hwdevice_ctx_create_derived(device_ctx, type, vk_dev, 0);
2620  if (!ret)
2621  return 0;
2622 
2623  if (ret != AVERROR(ENOSYS))
2624  return ret;
2625 
2626  av_log(NULL, AV_LOG_WARNING, "Derive %s from vulkan not supported.\n", hwaccel);
2627  ret = av_hwdevice_ctx_create(device_ctx, type, NULL, NULL, 0);
2628  return ret;
2629 }
2630 
2631 /* open a given stream. Return 0 if OK */
2632 static int stream_component_open(VideoState *is, int stream_index)
2633 {
2634  AVFormatContext *ic = is->ic;
2635  AVCodecContext *avctx;
2636  const AVCodec *codec;
2637  const char *forced_codec_name = NULL;
2638  AVDictionary *opts = NULL;
2639  int sample_rate;
2640  AVChannelLayout ch_layout = { 0 };
2641  int ret = 0;
2642  int stream_lowres = lowres;
2643 
2644  if (stream_index < 0 || stream_index >= ic->nb_streams)
2645  return -1;
2646 
2647  avctx = avcodec_alloc_context3(NULL);
2648  if (!avctx)
2649  return AVERROR(ENOMEM);
2650 
2651  ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2652  if (ret < 0)
2653  goto fail;
2654  avctx->pkt_timebase = ic->streams[stream_index]->time_base;
2655 
2656  codec = avcodec_find_decoder(avctx->codec_id);
2657 
2658  switch(avctx->codec_type){
2659  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2660  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2661  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2662  }
2663  if (forced_codec_name)
2664  codec = avcodec_find_decoder_by_name(forced_codec_name);
2665  if (!codec) {
2666  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2667  "No codec could be found with name '%s'\n", forced_codec_name);
2668  else av_log(NULL, AV_LOG_WARNING,
2669  "No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id));
2670  ret = AVERROR(EINVAL);
2671  goto fail;
2672  }
2673 
2674  avctx->codec_id = codec->id;
2675  if (stream_lowres > codec->max_lowres) {
2676  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2677  codec->max_lowres);
2678  stream_lowres = codec->max_lowres;
2679  }
2680  avctx->lowres = stream_lowres;
2681 
2682  if (fast)
2683  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2684 
2685  ret = filter_codec_opts(codec_opts, avctx->codec_id, ic,
2686  ic->streams[stream_index], codec, &opts, NULL);
2687  if (ret < 0)
2688  goto fail;
2689 
2690  if (!av_dict_get(opts, "threads", NULL, 0))
2691  av_dict_set(&opts, "threads", "auto", 0);
2692  if (stream_lowres)
2693  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2694 
2695  av_dict_set(&opts, "flags", "+copy_opaque", AV_DICT_MULTIKEY);
2696 
2697  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2698  ret = create_hwaccel(&avctx->hw_device_ctx);
2699  if (ret < 0)
2700  goto fail;
2701  }
2702 
2703  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2704  goto fail;
2705  }
2707  if (ret < 0)
2708  goto fail;
2709 
2710  is->eof = 0;
2711  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2712  switch (avctx->codec_type) {
2713  case AVMEDIA_TYPE_AUDIO:
2714  {
2715  AVFilterContext *sink;
2716 
2717  is->audio_filter_src.freq = avctx->sample_rate;
2718  ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &avctx->ch_layout);
2719  if (ret < 0)
2720  goto fail;
2721  is->audio_filter_src.fmt = avctx->sample_fmt;
2722  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2723  goto fail;
2724  sink = is->out_audio_filter;
2725  sample_rate = av_buffersink_get_sample_rate(sink);
2726  ret = av_buffersink_get_ch_layout(sink, &ch_layout);
2727  if (ret < 0)
2728  goto fail;
2729  }
2730 
2731  /* prepare audio output */
2732  if ((ret = audio_open(is, &ch_layout, sample_rate, &is->audio_tgt)) < 0)
2733  goto fail;
2734  is->audio_hw_buf_size = ret;
2735  is->audio_src = is->audio_tgt;
2736  is->audio_buf_size = 0;
2737  is->audio_buf_index = 0;
2738 
2739  /* init averaging filter */
2740  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2741  is->audio_diff_avg_count = 0;
2742  /* since we do not have a precise anough audio FIFO fullness,
2743  we correct audio sync only if larger than this threshold */
2744  is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;
2745 
2746  is->audio_stream = stream_index;
2747  is->audio_st = ic->streams[stream_index];
2748 
2749  if ((ret = decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread)) < 0)
2750  goto fail;
2751  if (is->ic->iformat->flags & AVFMT_NOTIMESTAMPS) {
2752  is->auddec.start_pts = is->audio_st->start_time;
2753  is->auddec.start_pts_tb = is->audio_st->time_base;
2754  }
2755  if ((ret = decoder_start(&is->auddec, audio_thread, "audio_decoder", is)) < 0)
2756  goto out;
2757  SDL_PauseAudioDevice(audio_dev, 0);
2758  break;
2759  case AVMEDIA_TYPE_VIDEO:
2760  is->video_stream = stream_index;
2761  is->video_st = ic->streams[stream_index];
2762 
2763  if ((ret = decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread)) < 0)
2764  goto fail;
2765  if ((ret = decoder_start(&is->viddec, video_thread, "video_decoder", is)) < 0)
2766  goto out;
2767  is->queue_attachments_req = 1;
2768  break;
2769  case AVMEDIA_TYPE_SUBTITLE:
2770  is->subtitle_stream = stream_index;
2771  is->subtitle_st = ic->streams[stream_index];
2772 
2773  if ((ret = decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread)) < 0)
2774  goto fail;
2775  if ((ret = decoder_start(&is->subdec, subtitle_thread, "subtitle_decoder", is)) < 0)
2776  goto out;
2777  break;
2778  default:
2779  break;
2780  }
2781  goto out;
2782 
2783 fail:
2784  avcodec_free_context(&avctx);
2785 out:
2786  av_channel_layout_uninit(&ch_layout);
2787  av_dict_free(&opts);
2788 
2789  return ret;
2790 }
2791 
2792 static int decode_interrupt_cb(void *ctx)
2793 {
2794  VideoState *is = ctx;
2795  return is->abort_request;
2796 }
2797 
2798 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2799  return stream_id < 0 ||
2800  queue->abort_request ||
2802  queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2803 }
2804 
2806 {
2807  if( !strcmp(s->iformat->name, "rtp")
2808  || !strcmp(s->iformat->name, "rtsp")
2809  || !strcmp(s->iformat->name, "sdp")
2810  )
2811  return 1;
2812 
2813  if(s->pb && ( !strncmp(s->url, "rtp:", 4)
2814  || !strncmp(s->url, "udp:", 4)
2815  )
2816  )
2817  return 1;
2818  return 0;
2819 }
2820 
2821 /* this thread gets the stream from the disk or the network */
2822 static int read_thread(void *arg)
2823 {
2824  VideoState *is = arg;
2825  AVFormatContext *ic = NULL;
2826  int err, i, ret;
2827  int st_index[AVMEDIA_TYPE_NB];
2828  AVPacket *pkt = NULL;
2829  int64_t stream_start_time;
2830  int pkt_in_play_range = 0;
2831  const AVDictionaryEntry *t;
2832  SDL_mutex *wait_mutex = SDL_CreateMutex();
2833  int scan_all_pmts_set = 0;
2834  int64_t pkt_ts;
2835 
2836  if (!wait_mutex) {
2837  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2838  ret = AVERROR(ENOMEM);
2839  goto fail;
2840  }
2841 
2842  memset(st_index, -1, sizeof(st_index));
2843  is->eof = 0;
2844 
2845  pkt = av_packet_alloc();
2846  if (!pkt) {
2847  av_log(NULL, AV_LOG_FATAL, "Could not allocate packet.\n");
2848  ret = AVERROR(ENOMEM);
2849  goto fail;
2850  }
2851  ic = avformat_alloc_context();
2852  if (!ic) {
2853  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2854  ret = AVERROR(ENOMEM);
2855  goto fail;
2856  }
2859  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2860  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2861  scan_all_pmts_set = 1;
2862  }
2863  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2864  if (err < 0) {
2865  print_error(is->filename, err);
2866  ret = -1;
2867  goto fail;
2868  }
2869  if (scan_all_pmts_set)
2870  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2872 
2874  if (ret < 0)
2875  goto fail;
2876  is->ic = ic;
2877 
2878  if (genpts)
2879  ic->flags |= AVFMT_FLAG_GENPTS;
2880 
2881  if (find_stream_info) {
2882  AVDictionary **opts;
2883  int orig_nb_streams = ic->nb_streams;
2884 
2886  if (err < 0) {
2888  "Error setting up avformat_find_stream_info() options\n");
2889  ret = err;
2890  goto fail;
2891  }
2892 
2893  err = avformat_find_stream_info(ic, opts);
2894 
2895  for (i = 0; i < orig_nb_streams; i++)
2896  av_dict_free(&opts[i]);
2897  av_freep(&opts);
2898 
2899  if (err < 0) {
2901  "%s: could not find codec parameters\n", is->filename);
2902  ret = -1;
2903  goto fail;
2904  }
2905  }
2906 
2907  if (ic->pb)
2908  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2909 
2910  if (seek_by_bytes < 0)
2912  !!(ic->iformat->flags & AVFMT_TS_DISCONT) &&
2913  strcmp("ogg", ic->iformat->name);
2914 
2915  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2916 
2917  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2918  window_title = av_asprintf("%s - %s", t->value, input_filename);
2919 
2920  /* if seeking requested, we execute it */
2921  if (start_time != AV_NOPTS_VALUE) {
2922  int64_t timestamp;
2923 
2924  timestamp = start_time;
2925  /* add the stream start time */
2926  if (ic->start_time != AV_NOPTS_VALUE)
2927  timestamp += ic->start_time;
2928  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2929  if (ret < 0) {
2930  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2931  is->filename, (double)timestamp / AV_TIME_BASE);
2932  }
2933  }
2934 
2935  is->realtime = is_realtime(ic);
2936 
2937  if (show_status)
2938  av_dump_format(ic, 0, is->filename, 0);
2939 
2940  for (i = 0; i < ic->nb_streams; i++) {
2941  AVStream *st = ic->streams[i];
2942  enum AVMediaType type = st->codecpar->codec_type;
2943  st->discard = AVDISCARD_ALL;
2944  if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
2946  st_index[type] = i;
2947  }
2948  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2949  if (wanted_stream_spec[i] && st_index[i] == -1) {
2950  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2951  st_index[i] = INT_MAX;
2952  }
2953  }
2954 
2955  if (!video_disable)
2956  st_index[AVMEDIA_TYPE_VIDEO] =
2958  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2959  if (!audio_disable)
2960  st_index[AVMEDIA_TYPE_AUDIO] =
2962  st_index[AVMEDIA_TYPE_AUDIO],
2963  st_index[AVMEDIA_TYPE_VIDEO],
2964  NULL, 0);
2966  st_index[AVMEDIA_TYPE_SUBTITLE] =
2968  st_index[AVMEDIA_TYPE_SUBTITLE],
2969  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2970  st_index[AVMEDIA_TYPE_AUDIO] :
2971  st_index[AVMEDIA_TYPE_VIDEO]),
2972  NULL, 0);
2973 
2974  is->show_mode = show_mode;
2975  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2976  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2977  AVCodecParameters *codecpar = st->codecpar;
2979  if (codecpar->width)
2980  set_default_window_size(codecpar->width, codecpar->height, sar);
2981  }
2982 
2983  /* open the streams */
2984  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2986  }
2987 
2988  ret = -1;
2989  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2991  }
2992  if (is->show_mode == SHOW_MODE_NONE)
2993  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2994 
2995  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2997  }
2998 
2999  if (is->video_stream < 0 && is->audio_stream < 0) {
3000  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
3001  is->filename);
3002  ret = -1;
3003  goto fail;
3004  }
3005 
3006  if (infinite_buffer < 0 && is->realtime)
3007  infinite_buffer = 1;
3008 
3009  for (;;) {
3010  if (is->abort_request)
3011  break;
3012  if (is->paused != is->last_paused) {
3013  is->last_paused = is->paused;
3014  if (is->paused)
3015  is->read_pause_return = av_read_pause(ic);
3016  else
3017  av_read_play(ic);
3018  }
3019 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
3020  if (is->paused &&
3021  (!strcmp(ic->iformat->name, "rtsp") ||
3022  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
3023  /* wait 10 ms to avoid trying to get another packet */
3024  /* XXX: horrible */
3025  SDL_Delay(10);
3026  continue;
3027  }
3028 #endif
3029  if (is->seek_req) {
3030  int64_t seek_target = is->seek_pos;
3031  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
3032  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
3033 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
3034 // of the seek_pos/seek_rel variables
3035 
3036  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
3037  if (ret < 0) {
3039  "%s: error while seeking\n", is->ic->url);
3040  } else {
3041  if (is->audio_stream >= 0)
3042  packet_queue_flush(&is->audioq);
3043  if (is->subtitle_stream >= 0)
3044  packet_queue_flush(&is->subtitleq);
3045  if (is->video_stream >= 0)
3046  packet_queue_flush(&is->videoq);
3047  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
3048  set_clock(&is->extclk, NAN, 0);
3049  } else {
3050  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
3051  }
3052  }
3053  is->seek_req = 0;
3054  is->queue_attachments_req = 1;
3055  is->eof = 0;
3056  if (is->paused)
3058  }
3059  if (is->queue_attachments_req) {
3060  if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
3061  if ((ret = av_packet_ref(pkt, &is->video_st->attached_pic)) < 0)
3062  goto fail;
3063  packet_queue_put(&is->videoq, pkt);
3064  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
3065  }
3066  is->queue_attachments_req = 0;
3067  }
3068 
3069  /* if the queue are full, no need to read more */
3070  if (infinite_buffer<1 &&
3071  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
3072  || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
3073  stream_has_enough_packets(is->video_st, is->video_stream, &is->videoq) &&
3074  stream_has_enough_packets(is->subtitle_st, is->subtitle_stream, &is->subtitleq)))) {
3075  /* wait 10 ms */
3076  SDL_LockMutex(wait_mutex);
3077  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3078  SDL_UnlockMutex(wait_mutex);
3079  continue;
3080  }
3081  if (!is->paused &&
3082  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3083  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3084  if (loop != 1 && (!loop || --loop)) {
3086  } else if (autoexit) {
3087  ret = AVERROR_EOF;
3088  goto fail;
3089  }
3090  }
3091  ret = av_read_frame(ic, pkt);
3092  if (ret < 0) {
3093  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3094  if (is->video_stream >= 0)
3095  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
3096  if (is->audio_stream >= 0)
3097  packet_queue_put_nullpacket(&is->audioq, pkt, is->audio_stream);
3098  if (is->subtitle_stream >= 0)
3099  packet_queue_put_nullpacket(&is->subtitleq, pkt, is->subtitle_stream);
3100  is->eof = 1;
3101  }
3102  if (ic->pb && ic->pb->error) {
3103  if (autoexit)
3104  goto fail;
3105  else
3106  break;
3107  }
3108  SDL_LockMutex(wait_mutex);
3109  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3110  SDL_UnlockMutex(wait_mutex);
3111  continue;
3112  } else {
3113  is->eof = 0;
3114  }
3115  /* check if packet is in play range specified by user, then queue, otherwise discard */
3116  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3117  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3118  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3119  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3121  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3122  <= ((double)duration / 1000000);
3123  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3124  packet_queue_put(&is->audioq, pkt);
3125  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3126  && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
3127  packet_queue_put(&is->videoq, pkt);
3128  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3129  packet_queue_put(&is->subtitleq, pkt);
3130  } else {
3132  }
3133  }
3134 
3135  ret = 0;
3136  fail:
3137  if (ic && !is->ic)
3138  avformat_close_input(&ic);
3139 
3140  av_packet_free(&pkt);
3141  if (ret != 0) {
3142  SDL_Event event;
3143 
3144  event.type = FF_QUIT_EVENT;
3145  event.user.data1 = is;
3146  SDL_PushEvent(&event);
3147  }
3148  SDL_DestroyMutex(wait_mutex);
3149  return 0;
3150 }
3151 
3152 static VideoState *stream_open(const char *filename,
3153  const AVInputFormat *iformat)
3154 {
3155  VideoState *is;
3156 
3157  is = av_mallocz(sizeof(VideoState));
3158  if (!is)
3159  return NULL;
3160  is->last_video_stream = is->video_stream = -1;
3161  is->last_audio_stream = is->audio_stream = -1;
3162  is->last_subtitle_stream = is->subtitle_stream = -1;
3163  is->filename = av_strdup(filename);
3164  if (!is->filename)
3165  goto fail;
3166  is->iformat = iformat;
3167  is->ytop = 0;
3168  is->xleft = 0;
3169 
3170  /* start video display */
3171  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3172  goto fail;
3173  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3174  goto fail;
3175  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3176  goto fail;
3177 
3178  if (packet_queue_init(&is->videoq) < 0 ||
3179  packet_queue_init(&is->audioq) < 0 ||
3180  packet_queue_init(&is->subtitleq) < 0)
3181  goto fail;
3182 
3183  if (!(is->continue_read_thread = SDL_CreateCond())) {
3184  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3185  goto fail;
3186  }
3187 
3188  init_clock(&is->vidclk, &is->videoq.serial);
3189  init_clock(&is->audclk, &is->audioq.serial);
3190  init_clock(&is->extclk, &is->extclk.serial);
3191  is->audio_clock_serial = -1;
3192  if (startup_volume < 0)
3193  av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
3194  if (startup_volume > 100)
3195  av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
3197  startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
3198  is->audio_volume = startup_volume;
3199  is->muted = 0;
3200  is->av_sync_type = av_sync_type;
3201  is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
3202  if (!is->read_tid) {
3203  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3204 fail:
3205  stream_close(is);
3206  return NULL;
3207  }
3208  return is;
3209 }
3210 
3212 {
3213  AVFormatContext *ic = is->ic;
3214  int start_index, stream_index;
3215  int old_index;
3216  AVStream *st;
3217  AVProgram *p = NULL;
3218  int nb_streams = is->ic->nb_streams;
3219 
3220  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3221  start_index = is->last_video_stream;
3222  old_index = is->video_stream;
3223  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3224  start_index = is->last_audio_stream;
3225  old_index = is->audio_stream;
3226  } else {
3227  start_index = is->last_subtitle_stream;
3228  old_index = is->subtitle_stream;
3229  }
3230  stream_index = start_index;
3231 
3232  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3233  p = av_find_program_from_stream(ic, NULL, is->video_stream);
3234  if (p) {
3236  for (start_index = 0; start_index < nb_streams; start_index++)
3237  if (p->stream_index[start_index] == stream_index)
3238  break;
3239  if (start_index == nb_streams)
3240  start_index = -1;
3241  stream_index = start_index;
3242  }
3243  }
3244 
3245  for (;;) {
3246  if (++stream_index >= nb_streams)
3247  {
3249  {
3250  stream_index = -1;
3251  is->last_subtitle_stream = -1;
3252  goto the_end;
3253  }
3254  if (start_index == -1)
3255  return;
3256  stream_index = 0;
3257  }
3258  if (stream_index == start_index)
3259  return;
3260  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3261  if (st->codecpar->codec_type == codec_type) {
3262  /* check that parameters are OK */
3263  switch (codec_type) {
3264  case AVMEDIA_TYPE_AUDIO:
3265  if (st->codecpar->sample_rate != 0 &&
3266  st->codecpar->ch_layout.nb_channels != 0)
3267  goto the_end;
3268  break;
3269  case AVMEDIA_TYPE_VIDEO:
3270  case AVMEDIA_TYPE_SUBTITLE:
3271  goto the_end;
3272  default:
3273  break;
3274  }
3275  }
3276  }
3277  the_end:
3278  if (p && stream_index != -1)
3279  stream_index = p->stream_index[stream_index];
3280  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3282  old_index,
3283  stream_index);
3284 
3285  stream_component_close(is, old_index);
3286  stream_component_open(is, stream_index);
3287 }
3288 
3289 
3291 {
3293  SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
3294 }
3295 
3297 {
3298  int next = is->show_mode;
3299  do {
3300  next = (next + 1) % SHOW_MODE_NB;
3301  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3302  if (is->show_mode != next) {
3303  is->force_refresh = 1;
3304  is->show_mode = next;
3305  }
3306 }
3307 
3308 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3309  double remaining_time = 0.0;
3310  SDL_PumpEvents();
3311  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3313  SDL_ShowCursor(0);
3314  cursor_hidden = 1;
3315  }
3316  if (remaining_time > 0.0)
3317  av_usleep((int64_t)(remaining_time * 1000000.0));
3318  remaining_time = REFRESH_RATE;
3319  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3320  video_refresh(is, &remaining_time);
3321  SDL_PumpEvents();
3322  }
3323 }
3324 
3325 static void seek_chapter(VideoState *is, int incr)
3326 {
3328  int i;
3329 
3330  if (!is->ic->nb_chapters)
3331  return;
3332 
3333  /* find the current chapter */
3334  for (i = 0; i < is->ic->nb_chapters; i++) {
3335  AVChapter *ch = is->ic->chapters[i];
3336  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3337  i--;
3338  break;
3339  }
3340  }
3341 
3342  i += incr;
3343  i = FFMAX(i, 0);
3344  if (i >= is->ic->nb_chapters)
3345  return;
3346 
3347  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3348  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3349  AV_TIME_BASE_Q), 0, 0);
3350 }
3351 
3352 /* handle an event sent by the GUI */
3353 static void event_loop(VideoState *cur_stream)
3354 {
3355  SDL_Event event;
3356  double incr, pos, frac;
3357 
3358  for (;;) {
3359  double x;
3360  refresh_loop_wait_event(cur_stream, &event);
3361  switch (event.type) {
3362  case SDL_KEYDOWN:
3363  if (exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
3364  do_exit(cur_stream);
3365  break;
3366  }
3367  // If we don't yet have a window, skip all key events, because read_thread might still be initializing...
3368  if (!cur_stream->width)
3369  continue;
3370  switch (event.key.keysym.sym) {
3371  case SDLK_f:
3372  toggle_full_screen(cur_stream);
3373  cur_stream->force_refresh = 1;
3374  break;
3375  case SDLK_p:
3376  case SDLK_SPACE:
3377  toggle_pause(cur_stream);
3378  break;
3379  case SDLK_m:
3380  toggle_mute(cur_stream);
3381  break;
3382  case SDLK_KP_MULTIPLY:
3383  case SDLK_0:
3384  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3385  break;
3386  case SDLK_KP_DIVIDE:
3387  case SDLK_9:
3388  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3389  break;
3390  case SDLK_s: // S: Step to next frame
3391  step_to_next_frame(cur_stream);
3392  break;
3393  case SDLK_a:
3395  break;
3396  case SDLK_v:
3398  break;
3399  case SDLK_c:
3403  break;
3404  case SDLK_t:
3406  break;
3407  case SDLK_w:
3408  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3409  if (++cur_stream->vfilter_idx >= nb_vfilters)
3410  cur_stream->vfilter_idx = 0;
3411  } else {
3412  cur_stream->vfilter_idx = 0;
3413  toggle_audio_display(cur_stream);
3414  }
3415  break;
3416  case SDLK_PAGEUP:
3417  if (cur_stream->ic->nb_chapters <= 1) {
3418  incr = 600.0;
3419  goto do_seek;
3420  }
3421  seek_chapter(cur_stream, 1);
3422  break;
3423  case SDLK_PAGEDOWN:
3424  if (cur_stream->ic->nb_chapters <= 1) {
3425  incr = -600.0;
3426  goto do_seek;
3427  }
3428  seek_chapter(cur_stream, -1);
3429  break;
3430  case SDLK_LEFT:
3431  incr = seek_interval ? -seek_interval : -10.0;
3432  goto do_seek;
3433  case SDLK_RIGHT:
3434  incr = seek_interval ? seek_interval : 10.0;
3435  goto do_seek;
3436  case SDLK_UP:
3437  incr = 60.0;
3438  goto do_seek;
3439  case SDLK_DOWN:
3440  incr = -60.0;
3441  do_seek:
3442  if (seek_by_bytes) {
3443  pos = -1;
3444  if (pos < 0 && cur_stream->video_stream >= 0)
3445  pos = frame_queue_last_pos(&cur_stream->pictq);
3446  if (pos < 0 && cur_stream->audio_stream >= 0)
3447  pos = frame_queue_last_pos(&cur_stream->sampq);
3448  if (pos < 0)
3449  pos = avio_tell(cur_stream->ic->pb);
3450  if (cur_stream->ic->bit_rate)
3451  incr *= cur_stream->ic->bit_rate / 8.0;
3452  else
3453  incr *= 180000.0;
3454  pos += incr;
3455  stream_seek(cur_stream, pos, incr, 1);
3456  } else {
3457  pos = get_master_clock(cur_stream);
3458  if (isnan(pos))
3459  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3460  pos += incr;
3461  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3462  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3463  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3464  }
3465  break;
3466  default:
3467  break;
3468  }
3469  break;
3470  case SDL_MOUSEBUTTONDOWN:
3471  if (exit_on_mousedown) {
3472  do_exit(cur_stream);
3473  break;
3474  }
3475  if (event.button.button == SDL_BUTTON_LEFT) {
3476  static int64_t last_mouse_left_click = 0;
3477  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3478  toggle_full_screen(cur_stream);
3479  cur_stream->force_refresh = 1;
3480  last_mouse_left_click = 0;
3481  } else {
3482  last_mouse_left_click = av_gettime_relative();
3483  }
3484  }
3485  case SDL_MOUSEMOTION:
3486  if (cursor_hidden) {
3487  SDL_ShowCursor(1);
3488  cursor_hidden = 0;
3489  }
3491  if (event.type == SDL_MOUSEBUTTONDOWN) {
3492  if (event.button.button != SDL_BUTTON_RIGHT)
3493  break;
3494  x = event.button.x;
3495  } else {
3496  if (!(event.motion.state & SDL_BUTTON_RMASK))
3497  break;
3498  x = event.motion.x;
3499  }
3500  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3501  uint64_t size = avio_size(cur_stream->ic->pb);
3502  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3503  } else {
3504  int64_t ts;
3505  int ns, hh, mm, ss;
3506  int tns, thh, tmm, tss;
3507  tns = cur_stream->ic->duration / 1000000LL;
3508  thh = tns / 3600;
3509  tmm = (tns % 3600) / 60;
3510  tss = (tns % 60);
3511  frac = x / cur_stream->width;
3512  ns = frac * tns;
3513  hh = ns / 3600;
3514  mm = (ns % 3600) / 60;
3515  ss = (ns % 60);
3517  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3518  hh, mm, ss, thh, tmm, tss);
3519  ts = frac * cur_stream->ic->duration;
3520  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3521  ts += cur_stream->ic->start_time;
3522  stream_seek(cur_stream, ts, 0, 0);
3523  }
3524  break;
3525  case SDL_WINDOWEVENT:
3526  switch (event.window.event) {
3527  case SDL_WINDOWEVENT_SIZE_CHANGED:
3528  screen_width = cur_stream->width = event.window.data1;
3529  screen_height = cur_stream->height = event.window.data2;
3530  if (cur_stream->vis_texture) {
3531  SDL_DestroyTexture(cur_stream->vis_texture);
3532  cur_stream->vis_texture = NULL;
3533  }
3534  if (vk_renderer)
3536  case SDL_WINDOWEVENT_EXPOSED:
3537  cur_stream->force_refresh = 1;
3538  }
3539  break;
3540  case SDL_QUIT:
3541  case FF_QUIT_EVENT:
3542  do_exit(cur_stream);
3543  break;
3544  default:
3545  break;
3546  }
3547  }
3548 }
3549 
3550 static int opt_width(void *optctx, const char *opt, const char *arg)
3551 {
3552  double num;
3553  int ret = parse_number(opt, arg, OPT_TYPE_INT64, 1, INT_MAX, &num);
3554  if (ret < 0)
3555  return ret;
3556 
3557  screen_width = num;
3558  return 0;
3559 }
3560 
3561 static int opt_height(void *optctx, const char *opt, const char *arg)
3562 {
3563  double num;
3564  int ret = parse_number(opt, arg, OPT_TYPE_INT64, 1, INT_MAX, &num);
3565  if (ret < 0)
3566  return ret;
3567 
3568  screen_height = num;
3569  return 0;
3570 }
3571 
3572 static int opt_format(void *optctx, const char *opt, const char *arg)
3573 {
3575  if (!file_iformat) {
3576  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3577  return AVERROR(EINVAL);
3578  }
3579  return 0;
3580 }
3581 
3582 static int opt_sync(void *optctx, const char *opt, const char *arg)
3583 {
3584  if (!strcmp(arg, "audio"))
3586  else if (!strcmp(arg, "video"))
3588  else if (!strcmp(arg, "ext"))
3590  else {
3591  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3592  exit(1);
3593  }
3594  return 0;
3595 }
3596 
3597 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3598 {
3599  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3600  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3601  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT : SHOW_MODE_NONE;
3602 
3603  if (show_mode == SHOW_MODE_NONE) {
3604  double num;
3605  int ret = parse_number(opt, arg, OPT_TYPE_INT, 0, SHOW_MODE_NB-1, &num);
3606  if (ret < 0)
3607  return ret;
3608  show_mode = num;
3609  }
3610  return 0;
3611 }
3612 
3613 static int opt_input_file(void *optctx, const char *filename)
3614 {
3615  if (input_filename) {
3617  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3618  filename, input_filename);
3619  return AVERROR(EINVAL);
3620  }
3621  if (!strcmp(filename, "-"))
3622  filename = "fd:";
3623  input_filename = av_strdup(filename);
3624  if (!input_filename)
3625  return AVERROR(ENOMEM);
3626 
3627  return 0;
3628 }
3629 
3630 static int opt_codec(void *optctx, const char *opt, const char *arg)
3631 {
3632  const char *spec = strchr(opt, ':');
3633  const char **name;
3634  if (!spec) {
3636  "No media specifier was specified in '%s' in option '%s'\n",
3637  arg, opt);
3638  return AVERROR(EINVAL);
3639  }
3640  spec++;
3641 
3642  switch (spec[0]) {
3643  case 'a' : name = &audio_codec_name; break;
3644  case 's' : name = &subtitle_codec_name; break;
3645  case 'v' : name = &video_codec_name; break;
3646  default:
3648  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3649  return AVERROR(EINVAL);
3650  }
3651 
3652  av_freep(name);
3653  *name = av_strdup(arg);
3654  return *name ? 0 : AVERROR(ENOMEM);
3655 }
3656 
3657 static int dummy;
3658 
3659 static const OptionDef options[] = {
3661  { "x", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3662  { "y", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3663  { "fs", OPT_TYPE_BOOL, 0, { &is_full_screen }, "force full screen" },
3664  { "an", OPT_TYPE_BOOL, 0, { &audio_disable }, "disable audio" },
3665  { "vn", OPT_TYPE_BOOL, 0, { &video_disable }, "disable video" },
3666  { "sn", OPT_TYPE_BOOL, 0, { &subtitle_disable }, "disable subtitling" },
3667  { "ast", OPT_TYPE_STRING, OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3668  { "vst", OPT_TYPE_STRING, OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3669  { "sst", OPT_TYPE_STRING, OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3670  { "ss", OPT_TYPE_TIME, 0, { &start_time }, "seek to a given position in seconds", "pos" },
3671  { "t", OPT_TYPE_TIME, 0, { &duration }, "play \"duration\" seconds of audio/video", "duration" },
3672  { "bytes", OPT_TYPE_INT, 0, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3673  { "seek_interval", OPT_TYPE_FLOAT, 0, { &seek_interval }, "set seek interval for left/right keys, in seconds", "seconds" },
3674  { "nodisp", OPT_TYPE_BOOL, 0, { &display_disable }, "disable graphical display" },
3675  { "noborder", OPT_TYPE_BOOL, 0, { &borderless }, "borderless window" },
3676  { "alwaysontop", OPT_TYPE_BOOL, 0, { &alwaysontop }, "window always on top" },
3677  { "volume", OPT_TYPE_INT, 0, { &startup_volume}, "set startup volume 0=min 100=max", "volume" },
3678  { "f", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3679  { "stats", OPT_TYPE_BOOL, OPT_EXPERT, { &show_status }, "show status", "" },
3680  { "fast", OPT_TYPE_BOOL, OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3681  { "genpts", OPT_TYPE_BOOL, OPT_EXPERT, { &genpts }, "generate pts", "" },
3682  { "drp", OPT_TYPE_INT, OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3683  { "lowres", OPT_TYPE_INT, OPT_EXPERT, { &lowres }, "", "" },
3684  { "sync", OPT_TYPE_FUNC, OPT_FUNC_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3685  { "autoexit", OPT_TYPE_BOOL, OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3686  { "exitonkeydown", OPT_TYPE_BOOL, OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3687  { "exitonmousedown", OPT_TYPE_BOOL, OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3688  { "loop", OPT_TYPE_INT, OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3689  { "framedrop", OPT_TYPE_BOOL, OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3690  { "infbuf", OPT_TYPE_BOOL, OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3691  { "window_title", OPT_TYPE_STRING, 0, { &window_title }, "set window title", "window title" },
3692  { "left", OPT_TYPE_INT, OPT_EXPERT, { &screen_left }, "set the x position for the left of the window", "x pos" },
3693  { "top", OPT_TYPE_INT, OPT_EXPERT, { &screen_top }, "set the y position for the top of the window", "y pos" },
3694  { "vf", OPT_TYPE_FUNC, OPT_FUNC_ARG | OPT_EXPERT, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3695  { "af", OPT_TYPE_STRING, 0, { &afilters }, "set audio filters", "filter_graph" },
3696  { "rdftspeed", OPT_TYPE_INT, OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3697  { "showmode", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3698  { "i", OPT_TYPE_BOOL, 0, { &dummy}, "read specified file", "input_file"},
3699  { "codec", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3700  { "acodec", OPT_TYPE_STRING, OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3701  { "scodec", OPT_TYPE_STRING, OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3702  { "vcodec", OPT_TYPE_STRING, OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3703  { "autorotate", OPT_TYPE_BOOL, 0, { &autorotate }, "automatically rotate video", "" },
3704  { "find_stream_info", OPT_TYPE_BOOL, OPT_INPUT | OPT_EXPERT, { &find_stream_info },
3705  "read and decode the streams to fill missing information with heuristics" },
3706  { "filter_threads", OPT_TYPE_INT, OPT_EXPERT, { &filter_nbthreads }, "number of filter threads per graph" },
3707  { "enable_vulkan", OPT_TYPE_BOOL, 0, { &enable_vulkan }, "enable vulkan renderer" },
3708  { "vulkan_params", OPT_TYPE_STRING, OPT_EXPERT, { &vulkan_params }, "vulkan configuration using a list of key=value pairs separated by ':'" },
3709  { "hwaccel", OPT_TYPE_STRING, OPT_EXPERT, { &hwaccel }, "use HW accelerated decoding" },
3710  { NULL, },
3711 };
3712 
3713 static void show_usage(void)
3714 {
3715  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3716  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3717  av_log(NULL, AV_LOG_INFO, "\n");
3718 }
3719 
3720 void show_help_default(const char *opt, const char *arg)
3721 {
3723  show_usage();
3724  show_help_options(options, "Main options:", 0, OPT_EXPERT);
3725  show_help_options(options, "Advanced options:", OPT_EXPERT, 0);
3726  printf("\n");
3730  printf("\nWhile playing:\n"
3731  "q, ESC quit\n"
3732  "f toggle full screen\n"
3733  "p, SPC pause\n"
3734  "m toggle mute\n"
3735  "9, 0 decrease and increase volume respectively\n"
3736  "/, * decrease and increase volume respectively\n"
3737  "a cycle audio channel in the current program\n"
3738  "v cycle video channel\n"
3739  "t cycle subtitle channel in the current program\n"
3740  "c cycle program\n"
3741  "w cycle video filters or show modes\n"
3742  "s activate frame-step mode\n"
3743  "left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
3744  "down/up seek backward/forward 1 minute\n"
3745  "page down/page up seek backward/forward 10 minutes\n"
3746  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3747  "left double-click toggle full screen\n"
3748  );
3749 }
3750 
3751 /* Called from the main */
3752 int main(int argc, char **argv)
3753 {
3754  int flags, ret;
3755  VideoState *is;
3756 
3757  init_dynload();
3758 
3760  parse_loglevel(argc, argv, options);
3761 
3762  /* register all codecs, demux and protocols */
3763 #if CONFIG_AVDEVICE
3765 #endif
3767 
3768  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3769  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3770 
3771  show_banner(argc, argv, options);
3772 
3773  ret = parse_options(NULL, argc, argv, options, opt_input_file);
3774  if (ret < 0)
3775  exit(ret == AVERROR_EXIT ? 0 : 1);
3776 
3777  if (!input_filename) {
3778  show_usage();
3779  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3781  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3782  exit(1);
3783  }
3784 
3785  if (display_disable) {
3786  video_disable = 1;
3787  }
3788  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3789  if (audio_disable)
3790  flags &= ~SDL_INIT_AUDIO;
3791  else {
3792  /* Try to work around an occasional ALSA buffer underflow issue when the
3793  * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3794  if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3795  SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
3796  }
3797  if (display_disable)
3798  flags &= ~SDL_INIT_VIDEO;
3799  if (SDL_Init (flags)) {
3800  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3801  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3802  exit(1);
3803  }
3804 
3805  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3806  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3807 
3808  if (!display_disable) {
3809  int flags = SDL_WINDOW_HIDDEN;
3810  if (alwaysontop)
3811 #if SDL_VERSION_ATLEAST(2,0,5)
3812  flags |= SDL_WINDOW_ALWAYS_ON_TOP;
3813 #else
3814  av_log(NULL, AV_LOG_WARNING, "Your SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.\n");
3815 #endif
3816  if (borderless)
3817  flags |= SDL_WINDOW_BORDERLESS;
3818  else
3819  flags |= SDL_WINDOW_RESIZABLE;
3820 
3821 #ifdef SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR
3822  SDL_SetHint(SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR, "0");
3823 #endif
3824  if (hwaccel && !enable_vulkan) {
3825  av_log(NULL, AV_LOG_INFO, "Enable vulkan renderer to support hwaccel %s\n", hwaccel);
3826  enable_vulkan = 1;
3827  }
3828  if (enable_vulkan) {
3830  if (vk_renderer) {
3831 #if SDL_VERSION_ATLEAST(2, 0, 6)
3832  flags |= SDL_WINDOW_VULKAN;
3833 #endif
3834  } else {
3835  av_log(NULL, AV_LOG_WARNING, "Doesn't support vulkan renderer, fallback to SDL renderer\n");
3836  enable_vulkan = 0;
3837  }
3838  }
3839  window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
3840  SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
3841  if (!window) {
3842  av_log(NULL, AV_LOG_FATAL, "Failed to create window: %s", SDL_GetError());
3843  do_exit(NULL);
3844  }
3845 
3846  if (vk_renderer) {
3847  AVDictionary *dict = NULL;
3848 
3849  if (vulkan_params) {
3850  int ret = av_dict_parse_string(&dict, vulkan_params, "=", ":", 0);
3851  if (ret < 0) {
3852  av_log(NULL, AV_LOG_FATAL, "Failed to parse, %s\n", vulkan_params);
3853  do_exit(NULL);
3854  }
3855  }
3857  av_dict_free(&dict);
3858  if (ret < 0) {
3859  av_log(NULL, AV_LOG_FATAL, "Failed to create vulkan renderer, %s\n", av_err2str(ret));
3860  do_exit(NULL);
3861  }
3862  } else {
3863  renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
3864  if (!renderer) {
3865  av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
3866  renderer = SDL_CreateRenderer(window, -1, 0);
3867  }
3868  if (renderer) {
3869  if (!SDL_GetRendererInfo(renderer, &renderer_info))
3870  av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", renderer_info.name);
3871  }
3872  if (!renderer || !renderer_info.num_texture_formats) {
3873  av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError());
3874  do_exit(NULL);
3875  }
3876  }
3877  }
3878 
3880  if (!is) {
3881  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3882  do_exit(NULL);
3883  }
3884 
3885  event_loop(is);
3886 
3887  /* never returns */
3888 
3889  return 0;
3890 }
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:605
AVSubtitle
Definition: avcodec.h:2238
rect::w
int w
Definition: f_ebur128.c:76
sws_getCachedContext
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2507
do_exit
static void do_exit(VideoState *is)
Definition: ffplay.c:1303
VideoState::seek_rel
int64_t seek_rel
Definition: ffplay.c:212
AV_PKT_DATA_DISPLAYMATRIX
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:105
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:429
show_help_options
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:107
AVCodec
AVCodec.
Definition: codec.h:187
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
VideoState::video_st
AVStream * video_st
Definition: ffplay.c:282
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:210
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
Frame::width
int width
Definition: ffplay.c:159
AVFMT_NO_BYTE_SEEK
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:487
av_clip
#define av_clip
Definition: common.h:100
VideoState::rdft
AVTXContext * rdft
Definition: ffplay.c:263
AudioParams::fmt
enum AVSampleFormat fmt
Definition: ffplay.c:132
av_sync_type
static int av_sync_type
Definition: ffplay.c:325
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVSubtitle::rects
AVSubtitleRect ** rects
Definition: avcodec.h:2243
opt.h
renderer_info
static SDL_RendererInfo renderer_info
Definition: ffplay.c:363
FrameData::pkt_pos
int64_t pkt_pos
Definition: ffplay.c:148
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1475
frame_queue_nb_remaining
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:802
VideoState::agraph
AVFilterGraph * agraph
Definition: ffplay.c:297
configure_audio_filters
static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
Definition: ffplay.c:1995
opt_add_vfilter
static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:394
frame_queue_next
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:786
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:51
Decoder::finished
int finished
Definition: ffplay.c:191
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:786
av_compare_ts
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
FrameData
Definition: ffmpeg.h:673
AVProgram::nb_stream_indexes
unsigned int nb_stream_indexes
Definition: avformat.h:1216
check_avoptions
int check_avoptions(AVDictionary *m)
Definition: cmdutils.c:1527
frame_queue_last_pos
static int64_t frame_queue_last_pos(FrameQueue *f)
Definition: ffplay.c:808
av_find_best_stream
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, const AVCodec **decoder_ret, int flags)
Definition: avformat.c:448
out
FILE * out
Definition: movenc.c:55
VideoState::rdft_fn
av_tx_fn rdft_fn
Definition: ffplay.c:264
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1056
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
video_thread
static int video_thread(void *arg)
Definition: ffplay.c:2160
VideoState::av_sync_type
int av_sync_type
Definition: ffplay.c:231
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:951
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
set_default_window_size
static void set_default_window_size(int width, int height, AVRational sar)
Definition: ffplay.c:1335
AV_PIX_FMT_BGR32
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:453
AV_NOSYNC_THRESHOLD
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:84
AVFormatContext::nb_chapters
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1387
AVCodecParameters
This struct describes the properties of an encoded stream.
Definition: codec_par.h:47
AV_LOG_QUIET
#define AV_LOG_QUIET
Print no output.
Definition: log.h:162
FrameQueue::keep_last
int keep_last
Definition: ffplay.c:173
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
VideoState::audio_hw_buf_size
int audio_hw_buf_size
Definition: ffplay.c:241
decoder_decode_frame
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
Definition: ffplay.c:579
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_DISPOSITION_ATTACHED_PIC
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:674
FrameQueue::cond
SDL_cond * cond
Definition: ffplay.c:176
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:120
AVStream::discard
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:819
av_find_program_from_stream
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: avformat.c:397
display_disable
static int display_disable
Definition: ffplay.c:320
screen_width
static int screen_width
Definition: ffplay.c:310
ffplay_renderer.h
DEBUG
#define DEBUG
Definition: vf_framerate.c:29
sws_dict
AVDictionary * sws_dict
Definition: cmdutils.c:56
swr_set_compensation
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:904
SAMPLE_ARRAY_SIZE
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:102
AVTXContext
Definition: tx_priv.h:235
rect
Definition: f_ebur128.c:76
update_volume
static void update_volume(VideoState *is, int sign, double step)
Definition: ffplay.c:1521
AVFMT_NOTIMESTAMPS
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:479
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:264
VideoState::auddec
Decoder auddec
Definition: ffplay.c:225
int64_t
long long int64_t
Definition: coverity.c:34
screen_left
static int screen_left
Definition: ffplay.c:312
av_opt_set_int_list
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:899
AudioParams::frame_size
int frame_size
Definition: ffplay.c:133
AVSubtitleRect
Definition: avcodec.h:2211
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2242
Decoder::next_pts
int64_t next_pts
Definition: ffplay.c:196
decoder_start
static int decoder_start(Decoder *d, int(*fn)(void *), const char *thread_name, void *arg)
Definition: ffplay.c:2149
rect::y
int y
Definition: f_ebur128.c:76
FrameQueue::size
int size
Definition: ffplay.c:171
avformat_get_class
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:197
av_unused
#define av_unused
Definition: attributes.h:131
normalize.log
log
Definition: normalize.py:21
Frame::sar
AVRational sar
Definition: ffplay.c:162
out_size
int out_size
Definition: movenc.c:56
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:162
VideoState::vis_texture
SDL_Texture * vis_texture
Definition: ffplay.c:270
queue_picture
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
Definition: ffplay.c:1753
sample_rates
static const int sample_rates[]
Definition: dcaenc.h:34
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
AudioParams
Definition: ffplay.c:129
VideoState::subtitle_st
AVStream * subtitle_st
Definition: ffplay.c:275
VideoState::audio_filter_src
struct AudioParams audio_filter_src
Definition: ffplay.c:251
pixdesc.h
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1355
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
VideoState::frame_last_filter_delay
double frame_last_filter_delay
Definition: ffplay.c:280
AVFrame::width
int width
Definition: frame.h:461
VideoState::xleft
int xleft
Definition: ffplay.c:289
AVPacketSideData
This structure stores auxiliary information for decoding, presenting, or otherwise processing the cod...
Definition: packet.h:390
Frame::pts
double pts
Definition: ffplay.c:156
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:686
OPT_INPUT
#define OPT_INPUT
Definition: cmdutils.h:233
frame_queue_init
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
Definition: ffplay.c:689
subtitle_codec_name
static const char * subtitle_codec_name
Definition: ffplay.c:340
parse_number
int parse_number(const char *context, const char *numstr, enum OptionType type, double min, double max, double *dst)
Parse a string and return its corresponding value as a double.
Definition: cmdutils.c:84
AV_HWDEVICE_TYPE_NONE
@ AV_HWDEVICE_TYPE_NONE
Definition: hwcontext.h:28
AVPacket::data
uint8_t * data
Definition: packet.h:539
AV_SYNC_VIDEO_MASTER
@ AV_SYNC_VIDEO_MASTER
Definition: ffplay.c:182
EXTERNAL_CLOCK_MIN_FRAMES
#define EXTERNAL_CLOCK_MIN_FRAMES
Definition: ffplay.c:66
b
#define b
Definition: input.c:41
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:173
vk_renderer_create
int vk_renderer_create(VkRenderer *renderer, SDL_Window *window, AVDictionary *opt)
Definition: ffplay_renderer.c:812
AVChapter::start
int64_t start
Definition: avformat.h:1249
Clock
Definition: ffplay.c:137
data
const char data[16]
Definition: mxf.c:148
SAMPLE_QUEUE_SIZE
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:126
program_name
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:61
Decoder::queue
PacketQueue * queue
Definition: ffplay.c:188
format_opts
AVDictionary * format_opts
Definition: cmdutils.c:58
AVSEEK_FLAG_BYTE
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2478
AVIOContext::error
int error
contains the error code or 0 if no error happened
Definition: avio.h:239
AV_PIX_FMT_RGB32_1
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:452
VideoState::audio_diff_avg_coef
double audio_diff_avg_coef
Definition: ffplay.c:236
av_hwdevice_find_type_by_name
enum AVHWDeviceType av_hwdevice_find_type_by_name(const char *name)
Look up an AVHWDeviceType by name.
Definition: hwcontext.c:102
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
CURSOR_HIDE_DELAY
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:104
SDL_VOLUME_STEP
#define SDL_VOLUME_STEP
Definition: ffplay.c:75
AVComplexFloat
Definition: tx.h:27
show_help_children
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
Definition: cmdutils.c:140
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:102
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
autorotate
static int autorotate
Definition: ffplay.c:348
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:557
TextureFormatEntry::texture_fmt
int texture_fmt
Definition: ffplay.c:370
video_disable
static int video_disable
Definition: ffplay.c:315
Frame::uploaded
int uploaded
Definition: ffplay.c:163
mathematics.h
sws_scale
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:1208
AVDictionary
Definition: dict.c:34
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:316
compute_target_delay
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1536
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Frame
Definition: ffplay.c:152
opt_input_file
static int opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3613
stream_close
static void stream_close(VideoState *is)
Definition: ffplay.c:1267
av_read_frame
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: demux.c:1538
SDL_AUDIO_MAX_CALLBACKS_PER_SEC
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:72
vk_renderer_destroy
void vk_renderer_destroy(VkRenderer *renderer)
Definition: ffplay_renderer.c:833
VideoState::paused
int paused
Definition: ffplay.c:206
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:321
init_clock
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1417
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:323
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
get_master_clock
static double get_master_clock(VideoState *is)
Definition: ffplay.c:1450
VideoState::width
int width
Definition: ffplay.c:289
file_iformat
static const AVInputFormat * file_iformat
Definition: ffplay.c:305
dummy
static int dummy
Definition: ffplay.c:3657
FF_QUIT_EVENT
#define FF_QUIT_EVENT
Definition: ffplay.c:359
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:74
PacketQueue
Definition: ffplay.c:113
av_tx_init
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
Definition: tx.c:903
subtitle_thread
static int subtitle_thread(void *arg)
Definition: ffplay.c:2263
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:594
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:410
VideoState::last_subtitle_stream
int last_subtitle_stream
Definition: ffplay.c:299
VideoState::SHOW_MODE_NONE
@ SHOW_MODE_NONE
Definition: ffplay.c:258
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:117
OptionDef
Definition: cmdutils.h:191
audio_decode_frame
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2366
subtitle_disable
static int subtitle_disable
Definition: ffplay.c:316
VideoState::pictq
FrameQueue pictq
Definition: ffplay.c:221
genpts
static int genpts
Definition: ffplay.c:329
VideoState::swr_ctx
struct SwrContext * swr_ctx
Definition: ffplay.c:253
opt_sync
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3582
step_to_next_frame
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1528
upload_texture
static int upload_texture(SDL_Texture **tex, AVFrame *frame)
Definition: ffplay.c:906
VideoState::sampq
FrameQueue sampq
Definition: ffplay.c:223
TextureFormatEntry::format
enum AVPixelFormat format
Definition: ffplay.c:369
FrameQueue::rindex
int rindex
Definition: ffplay.c:169
video_display
static void video_display(VideoState *is)
Definition: ffplay.c:1371
AVCodec::max_lowres
uint8_t max_lowres
maximum value for lowres supported by the decoder
Definition: codec.h:207
avformat_close_input
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: demux.c:363
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:615
AVFormatContext::interrupt_callback
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1560
SDL_AUDIO_MIN_BUFFER_SIZE
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:70
startup_volume
static int startup_volume
Definition: ffplay.c:323
window
static SDL_Window * window
Definition: ffplay.c:361
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
Definition: avfiltergraph.c:138
AV_SYNC_EXTERNAL_CLOCK
@ AV_SYNC_EXTERNAL_CLOCK
Definition: ffplay.c:183
fifo.h
toggle_full_screen
static void toggle_full_screen(VideoState *is)
Definition: ffplay.c:3290
Clock::queue_serial
int * queue_serial
Definition: ffplay.c:144
VideoState::extclk
Clock extclk
Definition: ffplay.c:219
VideoState::seek_flags
int seek_flags
Definition: ffplay.c:210
alwaysontop
static int alwaysontop
Definition: ffplay.c:322
VideoState::audio_st
AVStream * audio_st
Definition: ffplay.c:239
packet_queue_init
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:470
AVPacket::opaque_ref
AVBufferRef * opaque_ref
AVBufferRef for free use by the API user.
Definition: packet.h:575
AUDIO_DIFF_AVG_NB
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:95
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:1071
fail
#define fail()
Definition: checkasm.h:188
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:83
FrameQueue
Definition: ffplay.c:167
packet_queue_put
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:441
samplefmt.h
AVSubtitleRect::x
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2212
VideoState::video_stream
int video_stream
Definition: ffplay.c:281
autoexit
static int autoexit
Definition: ffplay.c:332
avio_tell
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:494
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
AVChapter
Definition: avformat.h:1246
video_image_display
static void video_image_display(VideoState *is)
Definition: ffplay.c:963
val
static double val(void *priv, double ch)
Definition: aeval.c:77
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:790
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
opt_show_mode
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3597
Decoder::empty_queue_cond
SDL_cond * empty_queue_cond
Definition: ffplay.c:193
pts
static int64_t pts
Definition: transcode_aac.c:644
set_clock_speed
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1411
VideoState::audio_diff_threshold
double audio_diff_threshold
Definition: ffplay.c:237
OPT_TYPE_FLOAT
@ OPT_TYPE_FLOAT
Definition: cmdutils.h:86
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:748
VideoState::audio_diff_cum
double audio_diff_cum
Definition: ffplay.c:235
VideoState::last_video_stream
int last_video_stream
Definition: ffplay.c:299
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
fast
static int fast
Definition: ffplay.c:328
loop
static int loop
Definition: ffplay.c:335
AVRational::num
int num
Numerator.
Definition: rational.h:59
avformat_network_init
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:551
VideoState::rdft_bits
int rdft_bits
Definition: ffplay.c:265
swr_convert
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t *const *out_arg, int out_count, const uint8_t *const *in_arg, int in_count)
Convert audio.
Definition: swresample.c:719
opt_height
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3561
avsubtitle_free
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: avcodec.c:406
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
sdl_texture_format_map
static const struct TextureFormatEntry sdl_texture_format_map[]
AVFormatContext::bit_rate
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1429
is_full_screen
static int is_full_screen
Definition: ffplay.c:356
avcodec_decode_subtitle2
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, const AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:982
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:76
set_sdl_yuv_conversion_mode
static void set_sdl_yuv_conversion_mode(AVFrame *frame)
Definition: ffplay.c:947
lrint
#define lrint
Definition: tablegen.h:53
Frame::flip_v
int flip_v
Definition: ffplay.c:164
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:206
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
AVFormatContext::metadata
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1522
vk_get_renderer
VkRenderer * vk_get_renderer(void)
Definition: ffplay_renderer.c:805
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
AVInputFormat
Definition: avformat.h:548
audio_thread
static int audio_thread(void *arg)
Definition: ffplay.c:2071
set_clock
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1405
av_dump_format
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
Definition: dump.c:824
VideoState
Definition: ffplay.c:201
frame_queue_peek_next
static Frame * frame_queue_peek_next(FrameQueue *f)
Definition: ffplay.c:734
sdl_audio_callback
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
Definition: ffplay.c:2476
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
sync_clock_to_slave
static void sync_clock_to_slave(Clock *c, Clock *slave)
Definition: ffplay.c:1425
av_tx_fn
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Definition: tx.h:151
swr_init
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:140
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
Definition: buffersink.c:190
avformat_open_input
int avformat_open_input(AVFormatContext **ps, const char *url, const AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: demux.c:215
frame_queue_signal
static void frame_queue_signal(FrameQueue *f)
Definition: ffplay.c:722
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:62
av_channel_layout_describe
int av_channel_layout_describe(const AVChannelLayout *channel_layout, char *buf, size_t buf_size)
Get a human-readable string describing the channel layout properties.
Definition: channel_layout.c:648
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
VideoState::ShowMode
ShowMode
Definition: ffplay.c:257
Decoder::avctx
AVCodecContext * avctx
Definition: ffplay.c:189
s
#define s(width, name)
Definition: cbs_vp9.c:198
show_help_default
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3720
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
default_height
static int default_height
Definition: ffplay.c:309
AVFormatContext::flags
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1438
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
AVCodecParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only.
Definition: codec_par.h:144
AVInputFormat::name
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:553
AVFormatContext::iformat
const struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1299
AV_PIX_FMT_0BGR32
#define AV_PIX_FMT_0BGR32
Definition: pixfmt.h:456
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:616
AVDictionaryEntry::key
char * key
Definition: dict.h:90
Clock::last_updated
double last_updated
Definition: ffplay.c:140
PacketQueue::duration
int64_t duration
Definition: ffplay.c:117
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
AVSubtitleRect::y
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2213
AVCodecParameters::width
int width
Video only.
Definition: codec_par.h:134
video_stream
static AVStream * video_stream
Definition: demux_decode.c:42
calculate_display_rect
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
Definition: ffplay.c:861
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
screen_height
static int screen_height
Definition: ffplay.c:311
EXTERNAL_CLOCK_SPEED_STEP
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:92
Decoder::pkt_serial
int pkt_serial
Definition: ffplay.c:190
configure_video_filters
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
Definition: ffplay.c:1860
avcodec_receive_frame
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder or encoder (when the AV_CODEC_FLAG_RECON_FRAME flag is used...
Definition: avcodec.c:713
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
AVHWDeviceType
AVHWDeviceType
Definition: hwcontext.h:27
AVMEDIA_TYPE_NB
@ AVMEDIA_TYPE_NB
Definition: avutil.h:206
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
av_read_play
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: demux_utils.c:182
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
opt_codec
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3630
AVPacketSideData::data
uint8_t * data
Definition: packet.h:391
Clock::pts_drift
double pts_drift
Definition: ffplay.c:139
VideoState::videoq
PacketQueue videoq
Definition: ffplay.c:283
ctx
AVFormatContext * ctx
Definition: movenc.c:49
av_guess_sample_aspect_ratio
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio.
Definition: avformat.c:732
channels
channels
Definition: aptx.h:31
limits.h
REFRESH_RATE
#define REFRESH_RATE
Definition: ffplay.c:98
FrameQueue::rindex_shown
int rindex_shown
Definition: ffplay.c:174
nb_streams
static int nb_streams
Definition: ffprobe.c:384
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
vk_renderer_get_hw_dev
int vk_renderer_get_hw_dev(VkRenderer *renderer, AVBufferRef **dev)
Definition: ffplay_renderer.c:818
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2244
VideoState::force_refresh
int force_refresh
Definition: ffplay.c:205
get_clock
static double get_clock(Clock *c)
Definition: ffplay.c:1385
vk_renderer_display
int vk_renderer_display(VkRenderer *renderer, AVFrame *frame)
Definition: ffplay_renderer.c:823
screen_top
static int screen_top
Definition: ffplay.c:313
VideoState::audio_diff_avg_count
int audio_diff_avg_count
Definition: ffplay.c:238
EXTERNAL_CLOCK_SPEED_MIN
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:90
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
renderer
static SDL_Renderer * renderer
Definition: ffplay.c:362
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
SwrContext
The libswresample context.
Definition: swresample_internal.h:95
vp_duration
static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp)
Definition: ffplay.c:1566
NAN
#define NAN
Definition: mathematics.h:115
AV_PIX_FMT_BGR32_1
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:454
VideoState::step
int step
Definition: ffplay.c:290
synchronize_audio
static int synchronize_audio(VideoState *is, int nb_samples)
Definition: ffplay.c:2318
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:461
arg
const char * arg
Definition: jacosubdec.c:67
if
if(ret)
Definition: filter_design.txt:179
window_title
static const char * window_title
Definition: ffplay.c:307
Clock::speed
double speed
Definition: ffplay.c:141
VideoState::SHOW_MODE_VIDEO
@ SHOW_MODE_VIDEO
Definition: ffplay.c:258
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:221
AVFormatContext
Format I/O context.
Definition: avformat.h:1287
av_log_get_level
int av_log_get_level(void)
Get the current log level.
Definition: log.c:442
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:634
init_dynload
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:75
opts
AVDictionary * opts
Definition: movenc.c:51
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:771
AVPacket::buf
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: packet.h:522
avcodec_parameters_to_context
int avcodec_parameters_to_context(AVCodecContext *codec, const struct AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
AVSubtitleRect::w
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:2214
seek_chapter
static void seek_chapter(VideoState *is, int incr)
Definition: ffplay.c:3325
VkRenderer
Definition: ffplay_renderer.c:49
get_master_sync_type
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1433
avcodec_get_class
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:184
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
frame_queue_destroy
static void frame_queue_destroy(FrameQueue *f)
Definition: ffplay.c:710
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1294
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:787
NULL
#define NULL
Definition: coverity.c:32
avcodec_find_decoder_by_name
const AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
Definition: allcodecs.c:1035
FrameQueue::max_size
int max_size
Definition: ffplay.c:172
AV_DICT_MULTIKEY
#define AV_DICT_MULTIKEY
Allow to store several equal keys in the dictionary.
Definition: dict.h:84
OPT_EXPERT
#define OPT_EXPERT
Definition: cmdutils.h:207
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:74
Decoder
Definition: ffmpeg.h:418
AudioParams::freq
int freq
Definition: ffplay.c:130
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
isnan
#define isnan(x)
Definition: libm.h:340
AudioParams::ch_layout
AVChannelLayout ch_layout
Definition: ffplay.c:131
audio_open
static int audio_open(void *opaque, AVChannelLayout *wanted_channel_layout, int wanted_sample_rate, struct AudioParams *audio_hw_params)
Definition: ffplay.c:2519
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:762
filter_codec_opts
int filter_codec_opts(const AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, const AVCodec *codec, AVDictionary **dst, AVDictionary **opts_used)
Filter out options for given codec.
Definition: cmdutils.c:1348
stream_cycle_channel
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3211
VideoState::frame_drops_late
int frame_drops_late
Definition: ffplay.c:255
AV_CODEC_FLAG2_FAST
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:357
AVFormatContext::pb
AVIOContext * pb
I/O context.
Definition: avformat.h:1329
av_log_set_flags
void av_log_set_flags(int arg)
Definition: log.c:452
frame_queue_unref_item
static void frame_queue_unref_item(Frame *vp)
Definition: ffplay.c:683
FrameQueue::queue
Frame queue[FRAME_QUEUE_SIZE]
Definition: ffplay.c:168
VideoState::last_i_start
int last_i_start
Definition: ffplay.c:262
Decoder::packet_pending
int packet_pending
Definition: ffplay.c:192
cursor_last_shown
static int64_t cursor_last_shown
Definition: ffplay.c:343
AVProgram::stream_index
unsigned int * stream_index
Definition: avformat.h:1215
frame_queue_peek
static Frame * frame_queue_peek(FrameQueue *f)
Definition: ffplay.c:729
avfilter_inout_alloc
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
Definition: graphparser.c:71
Frame::duration
double duration
Definition: ffplay.c:157
lowres
static int lowres
Definition: ffplay.c:330
double
double
Definition: af_crystalizer.c:132
AV_DICT_DONT_OVERWRITE
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:81
AV_PIX_FMT_RGB8
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)3R 3G 2B(lsb)
Definition: pixfmt.h:93
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:143
time.h
Frame::pos
int64_t pos
Definition: ffplay.c:158
VideoState::frame_last_returned_time
double frame_last_returned_time
Definition: ffplay.c:279
set_clock_at
static void set_clock_at(Clock *c, double pts, int serial, double time)
Definition: ffplay.c:1397
toggle_pause
static void toggle_pause(VideoState *is)
Definition: ffplay.c:1510
TextureFormatEntry
Definition: ffplay.c:368
AVFilterGraph
Definition: avfilter.h:760
AV_SYNC_AUDIO_MASTER
@ AV_SYNC_AUDIO_MASTER
Definition: ffplay.c:181
stream_component_open
static int stream_component_open(VideoState *is, int stream_index)
Definition: ffplay.c:2632
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
av_packet_ref
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: packet.c:437
AVCodecParameters::ch_layout
AVChannelLayout ch_layout
Audio only.
Definition: codec_par.h:180
VideoState::rdft_data
AVComplexFloat * rdft_data
Definition: ffplay.c:267
av_packet_move_ref
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: packet.c:486
AV_PIX_FMT_NE
#define AV_PIX_FMT_NE(be, le)
Definition: pixfmt.h:448
exp
int8_t exp
Definition: eval.c:73
VideoState::seek_req
int seek_req
Definition: ffplay.c:209
VideoState::SHOW_MODE_WAVES
@ SHOW_MODE_WAVES
Definition: ffplay.c:258
VideoState::audio_clock
double audio_clock
Definition: ffplay.c:233
VideoState::read_pause_return
int read_pause_return
Definition: ffplay.c:213
event_loop
static void event_loop(VideoState *cur_stream)
Definition: ffplay.c:3353
swresample.h
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
VideoState::ytop
int ytop
Definition: ffplay.c:289
AVCodecParameters::sample_rate
int sample_rate
Audio only.
Definition: codec_par.h:184
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:800
av_packet_side_data_get
const AVPacketSideData * av_packet_side_data_get(const AVPacketSideData *sd, int nb_sd, enum AVPacketSideDataType type)
Get side information from a side data array.
Definition: packet.c:656
avcodec_find_decoder
const AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: allcodecs.c:1007
VideoState::sample_array
int16_t sample_array[SAMPLE_ARRAY_SIZE]
Definition: ffplay.c:260
AVFormatContext::nb_streams
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1343
exit_on_mousedown
static int exit_on_mousedown
Definition: ffplay.c:334
AVFilterGraph::scale_sws_opts
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:765
VideoState::iformat
const AVInputFormat * iformat
Definition: ffplay.c:203
Decoder::next_pts_tb
AVRational next_pts_tb
Definition: ffplay.c:197
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1865
VideoState::audioq
PacketQueue audioq
Definition: ffplay.c:240
codec_opts
AVDictionary * codec_opts
Definition: cmdutils.c:58
audio_callback_time
static int64_t audio_callback_time
Definition: ffplay.c:357
AV_OPT_FLAG_FILTERING_PARAM
#define AV_OPT_FLAG_FILTERING_PARAM
A generic parameter which can be set by the user for filtering.
Definition: opt.h:381
avformat_find_stream_info
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: demux.c:2516
Frame::format
int format
Definition: ffplay.c:161
INSERT_FILT
#define INSERT_FILT(name, arg)
f
f
Definition: af_crystalizer.c:122
swr_alloc_set_opts2
int swr_alloc_set_opts2(struct SwrContext **ps, const AVChannelLayout *out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, const AVChannelLayout *in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
Definition: swresample.c:40
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:515
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:491
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
OPT_TYPE_INT
@ OPT_TYPE_INT
Definition: cmdutils.h:84
AVMediaType
AVMediaType
Definition: avutil.h:199
av_log_set_callback
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:462
AVPacket::size
int size
Definition: packet.h:540
VideoState::in_audio_filter
AVFilterContext * in_audio_filter
Definition: ffplay.c:295
AVFifo
Definition: fifo.c:35
avformat_match_stream_specifier
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
Definition: avformat.c:686
VideoState::audio_write_buf_size
int audio_write_buf_size
Definition: ffplay.c:247
avformat_alloc_context
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:161
AVDISCARD_DEFAULT
@ AVDISCARD_DEFAULT
discard useless packets like 0 size packets in avi
Definition: defs.h:216
height
#define height
Definition: dsp.h:85
FrameQueue::mutex
SDL_mutex * mutex
Definition: ffplay.c:175
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:240
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:311
frame_queue_peek_writable
static Frame * frame_queue_peek_writable(FrameQueue *f)
Definition: ffplay.c:744
OPT_AUDIO
#define OPT_AUDIO
Definition: cmdutils.h:209
av_buffersrc_parameters_alloc
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:104
Frame::sub
AVSubtitle sub
Definition: ffplay.c:154
VideoState::last_audio_stream
int last_audio_stream
Definition: ffplay.c:299
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
vfilters_list
static const char ** vfilters_list
Definition: ffplay.c:345
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:588
create_hwaccel
static int create_hwaccel(AVBufferRef **device_ctx)
Definition: ffplay.c:2595
decoder_init
static int decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
Definition: ffplay.c:566
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:148
AVBufferSrcParameters::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
sdl_supported_color_spaces
static enum AVColorSpace sdl_supported_color_spaces[]
Definition: ffplay.c:940
start_time
static int64_t start_time
Definition: ffplay.c:326
audio_stream
static AVStream * audio_stream
Definition: demux_decode.c:42
VideoState::SHOW_MODE_NB
@ SHOW_MODE_NB
Definition: ffplay.c:258
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1063
Frame::serial
int serial
Definition: ffplay.c:155
AVCodecContext::pkt_timebase
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are expressed.
Definition: avcodec.h:557
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
hypot
static av_const double hypot(double x, double y)
Definition: libm.h:366
uninit_opts
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents.
Definition: cmdutils.c:62
size
int size
Definition: twinvq_data.h:10344
VideoState::xpos
int xpos
Definition: ffplay.c:268
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
read_thread
static int read_thread(void *arg)
Definition: ffplay.c:2822
AV_PIX_FMT_BGR555
#define AV_PIX_FMT_BGR555
Definition: pixfmt.h:471
avformat_seek_file
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
Definition: seek.c:663
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
Clock::paused
int paused
Definition: ffplay.c:143
rect::h
int h
Definition: f_ebur128.c:76
VideoState::sub_texture
SDL_Texture * sub_texture
Definition: ffplay.c:271
swr_free
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
Definition: swresample.c:121
AVFrameSideData::data
uint8_t * data
Definition: frame.h:267
VideoState::vid_texture
SDL_Texture * vid_texture
Definition: ffplay.c:272
OPT_TYPE_INT64
@ OPT_TYPE_INT64
Definition: cmdutils.h:85
printf
printf("static const uint8_t my_array[100] = {\n")
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:476
VideoState::sample_array_index
int sample_array_index
Definition: ffplay.c:261
fn
#define fn(a)
Definition: aap_template.c:37
wanted_stream_spec
static const char * wanted_stream_spec[AVMEDIA_TYPE_NB]
Definition: ffplay.c:317
infinite_buffer
static int infinite_buffer
Definition: ffplay.c:337
AVSubtitle::end_display_time
uint32_t end_display_time
Definition: avcodec.h:2241
VideoState::max_frame_duration
double max_frame_duration
Definition: ffplay.c:284
avdevice.h
show_banner
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: opt_common.c:237
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:164
packet_queue_destroy
static void packet_queue_destroy(PacketQueue *q)
Definition: ffplay.c:504
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:538
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:451
VideoState::frame_drops_early
int frame_drops_early
Definition: ffplay.c:254
update_video_pts
static void update_video_pts(VideoState *is, double pts, int serial)
Definition: ffplay.c:1578
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
toggle_mute
static void toggle_mute(VideoState *is)
Definition: ffplay.c:1516
decoder_abort
static void decoder_abort(Decoder *d, FrameQueue *fq)
Definition: ffplay.c:817
video_refresh
static void video_refresh(void *opaque, double *remaining_time)
Definition: ffplay.c:1586
AV_CHANNEL_ORDER_NATIVE
@ AV_CHANNEL_ORDER_NATIVE
The native channel order, i.e.
Definition: channel_layout.h:122
ns
#define ns(max_value, name, subs,...)
Definition: cbs_av1.c:616
seek_interval
static float seek_interval
Definition: ffplay.c:319
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:63
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
rect::x
int x
Definition: f_ebur128.c:76
VideoState::seek_pos
int64_t seek_pos
Definition: ffplay.c:211
OPT_TYPE_FUNC
@ OPT_TYPE_FUNC
Definition: cmdutils.h:81
frame_queue_push
static void frame_queue_push(FrameQueue *f)
Definition: ffplay.c:776
audio_dev
static SDL_AudioDeviceID audio_dev
Definition: ffplay.c:364
OPT_TYPE_BOOL
@ OPT_TYPE_BOOL
Definition: cmdutils.h:82
av_tx_uninit
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
Definition: tx.c:295
sigterm_handler
static void sigterm_handler(int sig)
Definition: ffplay.c:1330
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:804
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
packet_queue_abort
static void packet_queue_abort(PacketQueue *q)
Definition: ffplay.c:512
video_codec_name
static const char * video_codec_name
Definition: ffplay.c:341
buffersink.h
AVCodec::id
enum AVCodecID id
Definition: codec.h:201
iformat
static const AVInputFormat * iformat
Definition: ffprobe.c:360
packet_queue_flush
static void packet_queue_flush(PacketQueue *q)
Definition: ffplay.c:490
av_channel_layout_default
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Definition: channel_layout.c:834
packet_queue_get
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
Definition: ffplay.c:532
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:40
MAX_QUEUE_SIZE
#define MAX_QUEUE_SIZE
Definition: ffplay.c:64
avcodec_get_name
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:409
MIN_FRAMES
#define MIN_FRAMES
Definition: ffplay.c:65
nb_vfilters
static int nb_vfilters
Definition: ffplay.c:346
VideoState::queue_attachments_req
int queue_attachments_req
Definition: ffplay.c:208
avcodec_send_packet
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:727
FrameQueue::windex
int windex
Definition: ffplay.c:170
VideoState::filename
char * filename
Definition: ffplay.c:288
VideoState::muted
int muted
Definition: ffplay.c:249
Decoder::start_pts
int64_t start_pts
Definition: ffplay.c:194
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:469
bprint.h
Clock::pts
double pts
Definition: ffplay.c:138
av_buffersrc_parameters_set
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:117
av_hwdevice_ctx_create_derived
int av_hwdevice_ctx_create_derived(AVBufferRef **dst_ref_ptr, enum AVHWDeviceType type, AVBufferRef *src_ref, int flags)
Create a new device of the specified type from an existing device.
Definition: hwcontext.c:703
VIDEO_PICTURE_QUEUE_SIZE
#define VIDEO_PICTURE_QUEUE_SIZE
Definition: ffplay.c:124
PacketQueue::serial
int serial
Definition: ffplay.c:119
AVSubtitle::format
uint16_t format
Definition: avcodec.h:2239
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:532
VideoState::show_mode
enum VideoState::ShowMode show_mode
VideoState::audio_src
struct AudioParams audio_src
Definition: ffplay.c:250
program_birth_year
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffplay.c:62
VideoState::audio_buf1
uint8_t * audio_buf1
Definition: ffplay.c:243
OPT_TYPE_TIME
@ OPT_TYPE_TIME
Definition: cmdutils.h:88
av_get_bytes_per_sample
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:108
avfilter_graph_parse_ptr
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs, void *log_ctx)
Add a graph described by a string to a graph.
Definition: graphparser.c:919
setup_find_stream_info_opts
int setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *local_codec_opts, AVDictionary ***dst)
Setup AVCodecContext options for avformat_find_stream_info().
Definition: cmdutils.c:1416
swr_opts
AVDictionary * swr_opts
Definition: cmdutils.c:57
compute_mod
static int compute_mod(int a, int b)
Definition: ffplay.c:1053
Decoder::start_pts_tb
AVRational start_pts_tb
Definition: ffplay.c:195
AVCodecParameters::height
int height
Definition: codec_par.h:135
AV_TIME_BASE
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:450
Decoder::pkt
AVPacket * pkt
Definition: ffplay.c:187
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
video_open
static int video_open(VideoState *is)
Definition: ffplay.c:1347
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:609
get_sdl_pix_fmt_and_blendmode
static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
Definition: ffplay.c:888
show_status
static int show_status
Definition: ffplay.c:324
opt_format
static int opt_format(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3572
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
parse_options
int parse_options(void *optctx, int argc, char **argv, const OptionDef *options, int(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:417
AV_PIX_FMT_RGB555
#define AV_PIX_FMT_RGB555
Definition: pixfmt.h:466
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:637
vk_renderer_resize
int vk_renderer_resize(VkRenderer *renderer, int width, int height)
Definition: ffplay_renderer.c:828
borderless
static int borderless
Definition: ffplay.c:321
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:610
update_sample_display
static void update_sample_display(VideoState *is, short *samples, int samples_size)
Definition: ffplay.c:2298
MyAVPacketList
Definition: ffplay.c:108
OPT_FUNC_ARG
#define OPT_FUNC_ARG
Definition: cmdutils.h:201
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:58
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
AVProgram
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1211
AVCodecContext::hw_device_ctx
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:1507
av_read_pause
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
Definition: demux_utils.c:191
len
int len
Definition: vorbis_enc_data.h:426
Frame::frame
AVFrame * frame
Definition: ffplay.c:153
AV_PIX_FMT_BGR565
#define AV_PIX_FMT_BGR565
Definition: pixfmt.h:470
av_samples_get_buffer_size
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:121
AVFilterPad::name
const char * name
Pad name.
Definition: filters.h:44
vk_renderer
static VkRenderer * vk_renderer
Definition: ffplay.c:366
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:612
PacketQueue::nb_packets
int nb_packets
Definition: ffplay.c:115
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
FRAME_QUEUE_SIZE
#define FRAME_QUEUE_SIZE
Definition: ffplay.c:127
frame_queue_peek_readable
static Frame * frame_queue_peek_readable(FrameQueue *f)
Definition: ffplay.c:760
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:310
AVIOInterruptCB::opaque
void * opaque
Definition: avio.h:61
AV_PIX_FMT_RGB565
#define AV_PIX_FMT_RGB565
Definition: pixfmt.h:465
parse_loglevel
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:552
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
Clock::serial
int serial
Definition: ffplay.c:142
VideoState::height
int height
Definition: ffplay.c:289
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
AVStream::disposition
int disposition
Stream disposition - a combination of AV_DISPOSITION_* flags.
Definition: avformat.h:817
AVFMT_FLAG_GENPTS
#define AVFMT_FLAG_GENPTS
Generate missing pts even if it requires parsing future frames.
Definition: avformat.h:1439
VideoState::subpq
FrameQueue subpq
Definition: ffplay.c:222
av_buffer_allocz
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:93
seek_by_bytes
static int seek_by_bytes
Definition: ffplay.c:318
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:748
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:174
EXTERNAL_CLOCK_MAX_FRAMES
#define EXTERNAL_CLOCK_MAX_FRAMES
Definition: ffplay.c:67
av_guess_frame_rate
AVRational av_guess_frame_rate(AVFormatContext *format, AVStream *st, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: avformat.c:755
AVSubtitleRect::h
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:2215
stream_open
static VideoState * stream_open(const char *filename, const AVInputFormat *iformat)
Definition: ffplay.c:3152
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
avcodec_flush_buffers
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal codec state / flush internal buffers.
Definition: avcodec.c:374
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_PIX_FMT_0RGB32
#define AV_PIX_FMT_0RGB32
Definition: pixfmt.h:455
VideoState::vfilter_idx
int vfilter_idx
Definition: ffplay.c:292
filter_nbthreads
static int filter_nbthreads
Definition: ffplay.c:350
log_callback_help
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
Definition: cmdutils.c:70
cursor_hidden
static int cursor_hidden
Definition: ffplay.c:344
VideoState::SHOW_MODE_RDFT
@ SHOW_MODE_RDFT
Definition: ffplay.c:258
av_hwdevice_ctx_create
int av_hwdevice_ctx_create(AVBufferRef **pdevice_ref, enum AVHWDeviceType type, const char *device, AVDictionary *opts, int flags)
Open a device of the specified type and create an AVHWDeviceContext for it.
Definition: hwcontext.c:600
find_stream_info
static int find_stream_info
Definition: ffplay.c:349
packet_queue_put_private
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:418
hwaccel
static const char * hwaccel
Definition: ffplay.c:353
pos
unsigned int pos
Definition: spdifenc.c:414
VideoState::audio_buf_index
int audio_buf_index
Definition: ffplay.c:246
avformat.h
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:99
VideoState::out_video_filter
AVFilterContext * out_video_filter
Definition: ffplay.c:294
dict.h
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:496
VideoState::last_paused
int last_paused
Definition: ffplay.c:207
AV_LOG_SKIP_REPEATED
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:370
remove_avoptions
void remove_avoptions(AVDictionary **a, AVDictionary *b)
Definition: cmdutils.c:1518
AV_PIX_FMT_UYVY422
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:88
CMDUTILS_COMMON_OPTIONS
#define CMDUTILS_COMMON_OPTIONS
Definition: opt_common.h:199
AV_DICT_MATCH_CASE
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:74
rdftspeed
double rdftspeed
Definition: ffplay.c:342
AV_TX_FLOAT_RDFT
@ AV_TX_FLOAT_RDFT
Real to complex and complex to real DFTs.
Definition: tx.h:90
MyAVPacketList::serial
int serial
Definition: ffplay.c:110
opt_width
static int opt_width(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3550
enable_vulkan
static int enable_vulkan
Definition: ffplay.c:351
main
int main(int argc, char **argv)
Definition: ffplay.c:3752
avformat_network_deinit
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:563
show_usage
static void show_usage(void)
Definition: ffplay.c:3713
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:28
AVCodecContext
main external API structure.
Definition: avcodec.h:451
AVFrame::height
int height
Definition: frame.h:461
PacketQueue::mutex
SDL_mutex * mutex
Definition: ffplay.c:120
packet_queue_start
static void packet_queue_start(PacketQueue *q)
Definition: ffplay.c:523
VideoState::vidclk
Clock vidclk
Definition: ffplay.c:218
audio_codec_name
static const char * audio_codec_name
Definition: ffplay.c:339
channel_layout.h
AVBufferSrcParameters
This structure contains the parameters describing the frames that will be passed to this filter.
Definition: buffersrc.h:73
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
AV_SYNC_FRAMEDUP_THRESHOLD
#define AV_SYNC_FRAMEDUP_THRESHOLD
Definition: ffplay.c:82
show_mode
static enum ShowMode show_mode
Definition: ffplay.c:338
PacketQueue::cond
SDL_cond * cond
Definition: ffplay.c:121
sws_freeContext
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2432
PacketQueue::size
int size
Definition: ffplay.c:116
options
static const OptionDef options[]
Definition: ffplay.c:3659
opt_common.h
AVInputFormat::flags
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_NOTIMESTAMPS,...
Definition: avformat.h:567
AVRational::den
int den
Denominator.
Definition: rational.h:60
mode
mode
Definition: ebur128.h:83
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
VideoState::in_video_filter
AVFilterContext * in_video_filter
Definition: ffplay.c:293
VideoState::subtitle_stream
int subtitle_stream
Definition: ffplay.c:274
avfilter.h
VideoState::abort_request
int abort_request
Definition: ffplay.c:204
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:826
av_bprint_clear
void av_bprint_clear(AVBPrint *buf)
Reset the string to "" but keep internal allocated data.
Definition: bprint.c:232
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:437
VideoState::audio_buf1_size
unsigned int audio_buf1_size
Definition: ffplay.c:245
VideoState::eof
int eof
Definition: ffplay.c:286
av_dict_parse_string
int av_dict_parse_string(AVDictionary **pm, const char *str, const char *key_val_sep, const char *pairs_sep, int flags)
Parse the key/value pairs list and add the parsed entries to a dictionary.
Definition: dict.c:200
AV_SYNC_THRESHOLD_MAX
#define AV_SYNC_THRESHOLD_MAX
Definition: ffplay.c:80
decoder_destroy
static void decoder_destroy(Decoder *d)
Definition: ffplay.c:678
av_get_packed_sample_fmt
enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
Get the packed alternative form of the given sample format.
Definition: samplefmt.c:77
VideoState::read_tid
SDL_Thread * read_tid
Definition: ffplay.c:202
VideoState::audio_volume
int audio_volume
Definition: ffplay.c:248
VideoState::subdec
Decoder subdec
Definition: ffplay.c:227
AVIOContext::eof_reached
int eof_reached
true if was unable to read due to error or eof
Definition: avio.h:238
stream_has_enough_packets
static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue)
Definition: ffplay.c:2798
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
VideoState::out_audio_filter
AVFilterContext * out_audio_filter
Definition: ffplay.c:296
av_find_input_format
const AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
Definition: format.c:144
AVFormatContext::duration
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1422
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVPacket::stream_index
int stream_index
Definition: packet.h:541
GROW_ARRAY
#define GROW_ARRAY(array, nb_elems)
Definition: cmdutils.h:532
SUBPICTURE_QUEUE_SIZE
#define SUBPICTURE_QUEUE_SIZE
Definition: ffplay.c:125
AVFilterContext
An instance of a filter.
Definition: avfilter.h:457
input_filename
static const char * input_filename
Definition: ffplay.c:306
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:444
stream_toggle_pause
static void stream_toggle_pause(VideoState *is)
Definition: ffplay.c:1497
VideoState::continue_read_thread
SDL_cond * continue_read_thread
Definition: ffplay.c:301
vulkan_params
static char * vulkan_params
Definition: ffplay.c:352
av_dict_set_int
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set() that converts the value to a string and stores it.
Definition: dict.c:167
toggle_audio_display
static void toggle_audio_display(VideoState *is)
Definition: ffplay.c:3296
AVCodecContext::codec_type
enum AVMediaType codec_type
Definition: avcodec.h:459
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
A generic parameter which can be set by the user for demuxing or decoding.
Definition: opt.h:356
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVFMT_TS_DISCONT
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:481
VideoState::real_data
float * real_data
Definition: ffplay.c:266
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
get_video_frame
static int get_video_frame(VideoState *is, AVFrame *frame)
Definition: ffplay.c:1784
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:265
default_width
static int default_width
Definition: ffplay.c:308
configure_filtergraph
static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph, AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
Definition: ffplay.c:1817
AVIOInterruptCB::callback
int(* callback)(void *)
Definition: avio.h:60
VideoState::realtime
int realtime
Definition: ffplay.c:215
VideoState::sub_convert_ctx
struct SwsContext * sub_convert_ctx
Definition: ffplay.c:285
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVDictionaryEntry
Definition: dict.h:89
OPT_TYPE_STRING
@ OPT_TYPE_STRING
Definition: cmdutils.h:83
AVPacket
This structure stores compressed data.
Definition: packet.h:516
audio_disable
static int audio_disable
Definition: ffplay.c:314
refresh_loop_wait_event
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
Definition: ffplay.c:3308
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:557
stream_component_close
static void stream_component_close(VideoState *is, int stream_index)
Definition: ffplay.c:1209
VideoState::subtitleq
PacketQueue subtitleq
Definition: ffplay.c:276
cmdutils.h
AVPacket::pos
int64_t pos
byte position in stream, -1 if unknown
Definition: packet.h:559
cmp_audio_fmts
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
Definition: ffplay.c:408
Decoder::decoder_tid
SDL_Thread * decoder_tid
Definition: ffplay.c:198
int32_t
int32_t
Definition: audioconvert.c:56
framedrop
static int framedrop
Definition: ffplay.c:336
VideoState::audio_stream
int audio_stream
Definition: ffplay.c:229
VideoState::audio_buf_size
unsigned int audio_buf_size
Definition: ffplay.c:244
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:482
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:434
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVFormatContext::start_time
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1412
PacketQueue::abort_request
int abort_request
Definition: ffplay.c:118
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
VideoState::ic
AVFormatContext * ic
Definition: ffplay.c:214
VideoState::viddec
Decoder viddec
Definition: ffplay.c:226
h
h
Definition: vp9dsp_template.c:2070
AVERROR_EXIT
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:58
AVDictionaryEntry::value
char * value
Definition: dict.h:91
AVStream::start_time
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base.
Definition: avformat.h:797
VideoState::audio_buf
uint8_t * audio_buf
Definition: ffplay.c:242
AVFilterGraph::nb_filters
unsigned nb_filters
Definition: avfilter.h:763
avstring.h
frame_queue_peek_last
static Frame * frame_queue_peek_last(FrameQueue *f)
Definition: ffplay.c:739
VideoState::last_vis_time
double last_vis_time
Definition: ffplay.c:269
width
#define width
Definition: dsp.h:85
stream_seek
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int by_bytes)
Definition: ffplay.c:1483
decoder_reorder_pts
static int decoder_reorder_pts
Definition: ffplay.c:331
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:611
VideoState::audio_tgt
struct AudioParams audio_tgt
Definition: ffplay.c:252
afilters
static char * afilters
Definition: ffplay.c:347
AVChapter::time_base
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1248
SwsContext
Definition: swscale_internal.h:299
VideoState::audclk
Clock audclk
Definition: ffplay.c:217
avfilter_get_class
const AVClass * avfilter_get_class(void)
Definition: avfilter.c:1618
AVFilterInOut
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:903
print_error
static void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.h:468
FrameQueue::pktq
PacketQueue * pktq
Definition: ffplay.c:177
short
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are short
Definition: writing_filters.txt:89
snprintf
#define snprintf
Definition: snprintf.h:34
video_audio_display
static void video_audio_display(VideoState *s)
Definition: ffplay.c:1058
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
AV_SYNC_THRESHOLD_MIN
#define AV_SYNC_THRESHOLD_MIN
Definition: ffplay.c:78
buffersrc.h
AudioParams::bytes_per_sec
int bytes_per_sec
Definition: ffplay.c:134
check_external_clock_speed
static void check_external_clock_speed(VideoState *is)
Definition: ffplay.c:1468
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:44
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2240
SAMPLE_CORRECTION_PERCENT_MAX
#define SAMPLE_CORRECTION_PERCENT_MAX
Definition: ffplay.c:87
EXTERNAL_CLOCK_SPEED_MAX
#define EXTERNAL_CLOCK_SPEED_MAX
Definition: ffplay.c:91
packet_queue_put_nullpacket
static int packet_queue_put_nullpacket(PacketQueue *q, AVPacket *pkt, int stream_index)
Definition: ffplay.c:463
duration
static int64_t duration
Definition: ffplay.c:327
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:63
MyAVPacketList::pkt
AVPacket * pkt
Definition: ffplay.c:109
swscale.h
is_realtime
static int is_realtime(AVFormatContext *s)
Definition: ffplay.c:2805
av_x_if_null
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:312
PacketQueue::pkt_list
AVFifo * pkt_list
Definition: ffplay.c:114
Frame::height
int height
Definition: ffplay.c:160
decode_interrupt_cb
static int decode_interrupt_cb(void *ctx)
Definition: ffplay.c:2792
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2885
VideoState::frame_timer
double frame_timer
Definition: ffplay.c:278
tx.h
VideoState::audio_clock_serial
int audio_clock_serial
Definition: ffplay.c:234
avdevice_register_all
FF_VISIBILITY_POP_HIDDEN av_cold void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:70
avio_feof
int avio_feof(AVIOContext *s)
Similar to feof() but also returns nonzero on read errors.
Definition: aviobuf.c:346
realloc_texture
static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
Definition: ffplay.c:837
AV_PIX_FMT_RGB444
#define AV_PIX_FMT_RGB444
Definition: pixfmt.h:467
exit_on_keydown
static int exit_on_keydown
Definition: ffplay.c:333