FFmpeg
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include "config_components.h"
28 #include <inttypes.h>
29 #include <math.h>
30 #include <limits.h>
31 #include <signal.h>
32 #include <stdint.h>
33 
34 #include "libavutil/avstring.h"
36 #include "libavutil/eval.h"
37 #include "libavutil/mathematics.h"
38 #include "libavutil/pixdesc.h"
39 #include "libavutil/imgutils.h"
40 #include "libavutil/dict.h"
41 #include "libavutil/fifo.h"
42 #include "libavutil/parseutils.h"
43 #include "libavutil/samplefmt.h"
44 #include "libavutil/time.h"
45 #include "libavutil/bprint.h"
46 #include "libavformat/avformat.h"
47 #include "libavdevice/avdevice.h"
48 #include "libswscale/swscale.h"
49 #include "libavutil/opt.h"
50 #include "libavcodec/avfft.h"
52 
53 #include "libavfilter/avfilter.h"
54 #include "libavfilter/buffersink.h"
55 #include "libavfilter/buffersrc.h"
56 
57 #include <SDL.h>
58 #include <SDL_thread.h>
59 
60 #include "cmdutils.h"
61 #include "opt_common.h"
62 
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65 
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 25
68 #define EXTERNAL_CLOCK_MIN_FRAMES 2
69 #define EXTERNAL_CLOCK_MAX_FRAMES 10
70 
71 /* Minimum SDL audio buffer size, in samples. */
72 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
73 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
74 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
75 
76 /* Step size for volume control in dB */
77 #define SDL_VOLUME_STEP (0.75)
78 
79 /* no AV sync correction is done if below the minimum AV sync threshold */
80 #define AV_SYNC_THRESHOLD_MIN 0.04
81 /* AV sync correction is done if above the maximum AV sync threshold */
82 #define AV_SYNC_THRESHOLD_MAX 0.1
83 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
84 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
85 /* no AV correction is done if too big error */
86 #define AV_NOSYNC_THRESHOLD 10.0
87 
88 /* maximum audio speed change to get correct sync */
89 #define SAMPLE_CORRECTION_PERCENT_MAX 10
90 
91 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
92 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
93 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
94 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
95 
96 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
97 #define AUDIO_DIFF_AVG_NB 20
98 
99 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
100 #define REFRESH_RATE 0.01
101 
102 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
103 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
104 #define SAMPLE_ARRAY_SIZE (8 * 65536)
105 
106 #define CURSOR_HIDE_DELAY 1000000
107 
108 #define USE_ONEPASS_SUBTITLE_RENDER 1
109 
110 typedef struct MyAVPacketList {
112  int serial;
114 
115 typedef struct PacketQueue {
118  int size;
119  int64_t duration;
121  int serial;
122  SDL_mutex *mutex;
123  SDL_cond *cond;
124 } PacketQueue;
125 
126 #define VIDEO_PICTURE_QUEUE_SIZE 3
127 #define SUBPICTURE_QUEUE_SIZE 16
128 #define SAMPLE_QUEUE_SIZE 9
129 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
130 
131 typedef struct AudioParams {
132  int freq;
137 } AudioParams;
138 
139 typedef struct Clock {
140  double pts; /* clock base */
141  double pts_drift; /* clock base minus time at which we updated the clock */
142  double last_updated;
143  double speed;
144  int serial; /* clock is based on a packet with this serial */
145  int paused;
146  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
147 } Clock;
148 
149 typedef struct FrameData {
150  int64_t pkt_pos;
151 } FrameData;
152 
153 /* Common struct for handling all types of decoded data and allocated render buffers. */
154 typedef struct Frame {
157  int serial;
158  double pts; /* presentation timestamp for the frame */
159  double duration; /* estimated duration of the frame */
160  int64_t pos; /* byte position of the frame in the input file */
161  int width;
162  int height;
163  int format;
165  int uploaded;
166  int flip_v;
167 } Frame;
168 
169 typedef struct FrameQueue {
171  int rindex;
172  int windex;
173  int size;
174  int max_size;
177  SDL_mutex *mutex;
178  SDL_cond *cond;
180 } FrameQueue;
181 
182 enum {
183  AV_SYNC_AUDIO_MASTER, /* default choice */
185  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
186 };
187 
188 typedef struct Decoder {
193  int finished;
195  SDL_cond *empty_queue_cond;
196  int64_t start_pts;
198  int64_t next_pts;
200  SDL_Thread *decoder_tid;
201 } Decoder;
202 
203 typedef struct VideoState {
204  SDL_Thread *read_tid;
208  int paused;
211  int seek_req;
213  int64_t seek_pos;
214  int64_t seek_rel;
217  int realtime;
218 
222 
226 
230 
232 
234 
235  double audio_clock;
237  double audio_diff_cum; /* used for AV difference average computation */
244  uint8_t *audio_buf;
245  uint8_t *audio_buf1;
246  unsigned int audio_buf_size; /* in bytes */
247  unsigned int audio_buf1_size;
248  int audio_buf_index; /* in bytes */
251  int muted;
258 
259  enum ShowMode {
261  } show_mode;
268  int xpos;
270  SDL_Texture *vis_texture;
271  SDL_Texture *sub_texture;
272  SDL_Texture *vid_texture;
273 
277 
278  double frame_timer;
284  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
286  int eof;
287 
288  char *filename;
290  int step;
291 
293  AVFilterContext *in_video_filter; // the first filter in the video chain
294  AVFilterContext *out_video_filter; // the last filter in the video chain
295  AVFilterContext *in_audio_filter; // the first filter in the audio chain
296  AVFilterContext *out_audio_filter; // the last filter in the audio chain
297  AVFilterGraph *agraph; // audio filter graph
298 
300 
302 } VideoState;
303 
304 /* options specified by the user */
306 static const char *input_filename;
307 static const char *window_title;
308 static int default_width = 640;
309 static int default_height = 480;
310 static int screen_width = 0;
311 static int screen_height = 0;
312 static int screen_left = SDL_WINDOWPOS_CENTERED;
313 static int screen_top = SDL_WINDOWPOS_CENTERED;
314 static int audio_disable;
315 static int video_disable;
316 static int subtitle_disable;
317 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
318 static int seek_by_bytes = -1;
319 static float seek_interval = 10;
320 static int display_disable;
321 static int borderless;
322 static int alwaysontop;
323 static int startup_volume = 100;
324 static int show_status = -1;
326 static int64_t start_time = AV_NOPTS_VALUE;
327 static int64_t duration = AV_NOPTS_VALUE;
328 static int fast = 0;
329 static int genpts = 0;
330 static int lowres = 0;
331 static int decoder_reorder_pts = -1;
332 static int autoexit;
333 static int exit_on_keydown;
334 static int exit_on_mousedown;
335 static int loop = 1;
336 static int framedrop = -1;
337 static int infinite_buffer = -1;
338 static enum ShowMode show_mode = SHOW_MODE_NONE;
339 static const char *audio_codec_name;
340 static const char *subtitle_codec_name;
341 static const char *video_codec_name;
342 double rdftspeed = 0.02;
343 static int64_t cursor_last_shown;
344 static int cursor_hidden = 0;
345 static const char **vfilters_list = NULL;
346 static int nb_vfilters = 0;
347 static char *afilters = NULL;
348 static int autorotate = 1;
349 static int find_stream_info = 1;
350 static int filter_nbthreads = 0;
351 
352 /* current context */
353 static int is_full_screen;
354 static int64_t audio_callback_time;
355 
356 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
357 
358 static SDL_Window *window;
359 static SDL_Renderer *renderer;
360 static SDL_RendererInfo renderer_info = {0};
361 static SDL_AudioDeviceID audio_dev;
362 
363 static const struct TextureFormatEntry {
367  { AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
368  { AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
369  { AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
370  { AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
371  { AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
372  { AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
373  { AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
374  { AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
375  { AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
376  { AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
377  { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
378  { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
379  { AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
380  { AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
381  { AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
382  { AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
383  { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
384  { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
385  { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
386  { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
387 };
388 
389 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
390 {
393  return 0;
394 }
395 
396 static inline
397 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
398  enum AVSampleFormat fmt2, int64_t channel_count2)
399 {
400  /* If channel count == 1, planar and non-planar formats are the same */
401  if (channel_count1 == 1 && channel_count2 == 1)
403  else
404  return channel_count1 != channel_count2 || fmt1 != fmt2;
405 }
406 
408 {
409  MyAVPacketList pkt1;
410  int ret;
411 
412  if (q->abort_request)
413  return -1;
414 
415 
416  pkt1.pkt = pkt;
417  pkt1.serial = q->serial;
418 
419  ret = av_fifo_write(q->pkt_list, &pkt1, 1);
420  if (ret < 0)
421  return ret;
422  q->nb_packets++;
423  q->size += pkt1.pkt->size + sizeof(pkt1);
424  q->duration += pkt1.pkt->duration;
425  /* XXX: should duplicate packet data in DV case */
426  SDL_CondSignal(q->cond);
427  return 0;
428 }
429 
431 {
432  AVPacket *pkt1;
433  int ret;
434 
435  pkt1 = av_packet_alloc();
436  if (!pkt1) {
438  return -1;
439  }
440  av_packet_move_ref(pkt1, pkt);
441 
442  SDL_LockMutex(q->mutex);
443  ret = packet_queue_put_private(q, pkt1);
444  SDL_UnlockMutex(q->mutex);
445 
446  if (ret < 0)
447  av_packet_free(&pkt1);
448 
449  return ret;
450 }
451 
452 static int packet_queue_put_nullpacket(PacketQueue *q, AVPacket *pkt, int stream_index)
453 {
454  pkt->stream_index = stream_index;
455  return packet_queue_put(q, pkt);
456 }
457 
458 /* packet queue handling */
460 {
461  memset(q, 0, sizeof(PacketQueue));
463  if (!q->pkt_list)
464  return AVERROR(ENOMEM);
465  q->mutex = SDL_CreateMutex();
466  if (!q->mutex) {
467  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
468  return AVERROR(ENOMEM);
469  }
470  q->cond = SDL_CreateCond();
471  if (!q->cond) {
472  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
473  return AVERROR(ENOMEM);
474  }
475  q->abort_request = 1;
476  return 0;
477 }
478 
480 {
481  MyAVPacketList pkt1;
482 
483  SDL_LockMutex(q->mutex);
484  while (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0)
485  av_packet_free(&pkt1.pkt);
486  q->nb_packets = 0;
487  q->size = 0;
488  q->duration = 0;
489  q->serial++;
490  SDL_UnlockMutex(q->mutex);
491 }
492 
494 {
497  SDL_DestroyMutex(q->mutex);
498  SDL_DestroyCond(q->cond);
499 }
500 
502 {
503  SDL_LockMutex(q->mutex);
504 
505  q->abort_request = 1;
506 
507  SDL_CondSignal(q->cond);
508 
509  SDL_UnlockMutex(q->mutex);
510 }
511 
513 {
514  SDL_LockMutex(q->mutex);
515  q->abort_request = 0;
516  q->serial++;
517  SDL_UnlockMutex(q->mutex);
518 }
519 
520 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
521 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
522 {
523  MyAVPacketList pkt1;
524  int ret;
525 
526  SDL_LockMutex(q->mutex);
527 
528  for (;;) {
529  if (q->abort_request) {
530  ret = -1;
531  break;
532  }
533 
534  if (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0) {
535  q->nb_packets--;
536  q->size -= pkt1.pkt->size + sizeof(pkt1);
537  q->duration -= pkt1.pkt->duration;
538  av_packet_move_ref(pkt, pkt1.pkt);
539  if (serial)
540  *serial = pkt1.serial;
541  av_packet_free(&pkt1.pkt);
542  ret = 1;
543  break;
544  } else if (!block) {
545  ret = 0;
546  break;
547  } else {
548  SDL_CondWait(q->cond, q->mutex);
549  }
550  }
551  SDL_UnlockMutex(q->mutex);
552  return ret;
553 }
554 
555 static int decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
556  memset(d, 0, sizeof(Decoder));
557  d->pkt = av_packet_alloc();
558  if (!d->pkt)
559  return AVERROR(ENOMEM);
560  d->avctx = avctx;
561  d->queue = queue;
562  d->empty_queue_cond = empty_queue_cond;
563  d->start_pts = AV_NOPTS_VALUE;
564  d->pkt_serial = -1;
565  return 0;
566 }
567 
569  int ret = AVERROR(EAGAIN);
570 
571  for (;;) {
572  if (d->queue->serial == d->pkt_serial) {
573  do {
574  if (d->queue->abort_request)
575  return -1;
576 
577  switch (d->avctx->codec_type) {
578  case AVMEDIA_TYPE_VIDEO:
579  ret = avcodec_receive_frame(d->avctx, frame);
580  if (ret >= 0) {
581  if (decoder_reorder_pts == -1) {
582  frame->pts = frame->best_effort_timestamp;
583  } else if (!decoder_reorder_pts) {
584  frame->pts = frame->pkt_dts;
585  }
586  }
587  break;
588  case AVMEDIA_TYPE_AUDIO:
589  ret = avcodec_receive_frame(d->avctx, frame);
590  if (ret >= 0) {
591  AVRational tb = (AVRational){1, frame->sample_rate};
592  if (frame->pts != AV_NOPTS_VALUE)
593  frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);
594  else if (d->next_pts != AV_NOPTS_VALUE)
595  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
596  if (frame->pts != AV_NOPTS_VALUE) {
597  d->next_pts = frame->pts + frame->nb_samples;
598  d->next_pts_tb = tb;
599  }
600  }
601  break;
602  }
603  if (ret == AVERROR_EOF) {
604  d->finished = d->pkt_serial;
605  avcodec_flush_buffers(d->avctx);
606  return 0;
607  }
608  if (ret >= 0)
609  return 1;
610  } while (ret != AVERROR(EAGAIN));
611  }
612 
613  do {
614  if (d->queue->nb_packets == 0)
615  SDL_CondSignal(d->empty_queue_cond);
616  if (d->packet_pending) {
617  d->packet_pending = 0;
618  } else {
619  int old_serial = d->pkt_serial;
620  if (packet_queue_get(d->queue, d->pkt, 1, &d->pkt_serial) < 0)
621  return -1;
622  if (old_serial != d->pkt_serial) {
623  avcodec_flush_buffers(d->avctx);
624  d->finished = 0;
625  d->next_pts = d->start_pts;
626  d->next_pts_tb = d->start_pts_tb;
627  }
628  }
629  if (d->queue->serial == d->pkt_serial)
630  break;
631  av_packet_unref(d->pkt);
632  } while (1);
633 
634  if (d->avctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
635  int got_frame = 0;
636  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, d->pkt);
637  if (ret < 0) {
638  ret = AVERROR(EAGAIN);
639  } else {
640  if (got_frame && !d->pkt->data) {
641  d->packet_pending = 1;
642  }
643  ret = got_frame ? 0 : (d->pkt->data ? AVERROR(EAGAIN) : AVERROR_EOF);
644  }
645  av_packet_unref(d->pkt);
646  } else {
647  if (d->pkt->buf && !d->pkt->opaque_ref) {
648  FrameData *fd;
649 
650  d->pkt->opaque_ref = av_buffer_allocz(sizeof(*fd));
651  if (!d->pkt->opaque_ref)
652  return AVERROR(ENOMEM);
653  fd = (FrameData*)d->pkt->opaque_ref->data;
654  fd->pkt_pos = d->pkt->pos;
655  }
656 
657  if (avcodec_send_packet(d->avctx, d->pkt) == AVERROR(EAGAIN)) {
658  av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
659  d->packet_pending = 1;
660  } else {
661  av_packet_unref(d->pkt);
662  }
663  }
664  }
665 }
666 
667 static void decoder_destroy(Decoder *d) {
668  av_packet_free(&d->pkt);
669  avcodec_free_context(&d->avctx);
670 }
671 
673 {
674  av_frame_unref(vp->frame);
675  avsubtitle_free(&vp->sub);
676 }
677 
678 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
679 {
680  int i;
681  memset(f, 0, sizeof(FrameQueue));
682  if (!(f->mutex = SDL_CreateMutex())) {
683  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
684  return AVERROR(ENOMEM);
685  }
686  if (!(f->cond = SDL_CreateCond())) {
687  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
688  return AVERROR(ENOMEM);
689  }
690  f->pktq = pktq;
691  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
692  f->keep_last = !!keep_last;
693  for (i = 0; i < f->max_size; i++)
694  if (!(f->queue[i].frame = av_frame_alloc()))
695  return AVERROR(ENOMEM);
696  return 0;
697 }
698 
700 {
701  int i;
702  for (i = 0; i < f->max_size; i++) {
703  Frame *vp = &f->queue[i];
705  av_frame_free(&vp->frame);
706  }
707  SDL_DestroyMutex(f->mutex);
708  SDL_DestroyCond(f->cond);
709 }
710 
712 {
713  SDL_LockMutex(f->mutex);
714  SDL_CondSignal(f->cond);
715  SDL_UnlockMutex(f->mutex);
716 }
717 
719 {
720  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
721 }
722 
724 {
725  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
726 }
727 
729 {
730  return &f->queue[f->rindex];
731 }
732 
734 {
735  /* wait until we have space to put a new frame */
736  SDL_LockMutex(f->mutex);
737  while (f->size >= f->max_size &&
738  !f->pktq->abort_request) {
739  SDL_CondWait(f->cond, f->mutex);
740  }
741  SDL_UnlockMutex(f->mutex);
742 
743  if (f->pktq->abort_request)
744  return NULL;
745 
746  return &f->queue[f->windex];
747 }
748 
750 {
751  /* wait until we have a readable a new frame */
752  SDL_LockMutex(f->mutex);
753  while (f->size - f->rindex_shown <= 0 &&
754  !f->pktq->abort_request) {
755  SDL_CondWait(f->cond, f->mutex);
756  }
757  SDL_UnlockMutex(f->mutex);
758 
759  if (f->pktq->abort_request)
760  return NULL;
761 
762  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
763 }
764 
766 {
767  if (++f->windex == f->max_size)
768  f->windex = 0;
769  SDL_LockMutex(f->mutex);
770  f->size++;
771  SDL_CondSignal(f->cond);
772  SDL_UnlockMutex(f->mutex);
773 }
774 
776 {
777  if (f->keep_last && !f->rindex_shown) {
778  f->rindex_shown = 1;
779  return;
780  }
781  frame_queue_unref_item(&f->queue[f->rindex]);
782  if (++f->rindex == f->max_size)
783  f->rindex = 0;
784  SDL_LockMutex(f->mutex);
785  f->size--;
786  SDL_CondSignal(f->cond);
787  SDL_UnlockMutex(f->mutex);
788 }
789 
790 /* return the number of undisplayed frames in the queue */
792 {
793  return f->size - f->rindex_shown;
794 }
795 
796 /* return last shown position */
798 {
799  Frame *fp = &f->queue[f->rindex];
800  if (f->rindex_shown && fp->serial == f->pktq->serial)
801  return fp->pos;
802  else
803  return -1;
804 }
805 
806 static void decoder_abort(Decoder *d, FrameQueue *fq)
807 {
808  packet_queue_abort(d->queue);
809  frame_queue_signal(fq);
810  SDL_WaitThread(d->decoder_tid, NULL);
811  d->decoder_tid = NULL;
812  packet_queue_flush(d->queue);
813 }
814 
815 static inline void fill_rectangle(int x, int y, int w, int h)
816 {
817  SDL_Rect rect;
818  rect.x = x;
819  rect.y = y;
820  rect.w = w;
821  rect.h = h;
822  if (w && h)
823  SDL_RenderFillRect(renderer, &rect);
824 }
825 
826 static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
827 {
828  Uint32 format;
829  int access, w, h;
830  if (!*texture || SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
831  void *pixels;
832  int pitch;
833  if (*texture)
834  SDL_DestroyTexture(*texture);
835  if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
836  return -1;
837  if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
838  return -1;
839  if (init_texture) {
840  if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
841  return -1;
842  memset(pixels, 0, pitch * new_height);
843  SDL_UnlockTexture(*texture);
844  }
845  av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
846  }
847  return 0;
848 }
849 
850 static void calculate_display_rect(SDL_Rect *rect,
851  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
852  int pic_width, int pic_height, AVRational pic_sar)
853 {
854  AVRational aspect_ratio = pic_sar;
855  int64_t width, height, x, y;
856 
857  if (av_cmp_q(aspect_ratio, av_make_q(0, 1)) <= 0)
858  aspect_ratio = av_make_q(1, 1);
859 
860  aspect_ratio = av_mul_q(aspect_ratio, av_make_q(pic_width, pic_height));
861 
862  /* XXX: we suppose the screen has a 1.0 pixel ratio */
863  height = scr_height;
864  width = av_rescale(height, aspect_ratio.num, aspect_ratio.den) & ~1;
865  if (width > scr_width) {
866  width = scr_width;
867  height = av_rescale(width, aspect_ratio.den, aspect_ratio.num) & ~1;
868  }
869  x = (scr_width - width) / 2;
870  y = (scr_height - height) / 2;
871  rect->x = scr_xleft + x;
872  rect->y = scr_ytop + y;
873  rect->w = FFMAX((int)width, 1);
874  rect->h = FFMAX((int)height, 1);
875 }
876 
877 static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
878 {
879  int i;
880  *sdl_blendmode = SDL_BLENDMODE_NONE;
881  *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
882  if (format == AV_PIX_FMT_RGB32 ||
886  *sdl_blendmode = SDL_BLENDMODE_BLEND;
887  for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
889  *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
890  return;
891  }
892  }
893 }
894 
895 static int upload_texture(SDL_Texture **tex, AVFrame *frame)
896 {
897  int ret = 0;
898  Uint32 sdl_pix_fmt;
899  SDL_BlendMode sdl_blendmode;
900  get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
901  if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
902  return -1;
903  switch (sdl_pix_fmt) {
904  case SDL_PIXELFORMAT_IYUV:
905  if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
906  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
907  frame->data[1], frame->linesize[1],
908  frame->data[2], frame->linesize[2]);
909  } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
910  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
911  frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
912  frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
913  } else {
914  av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
915  return -1;
916  }
917  break;
918  default:
919  if (frame->linesize[0] < 0) {
920  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
921  } else {
922  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
923  }
924  break;
925  }
926  return ret;
927 }
928 
930 {
931 #if SDL_VERSION_ATLEAST(2,0,8)
932  SDL_YUV_CONVERSION_MODE mode = SDL_YUV_CONVERSION_AUTOMATIC;
933  if (frame && (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUYV422 || frame->format == AV_PIX_FMT_UYVY422)) {
934  if (frame->color_range == AVCOL_RANGE_JPEG)
935  mode = SDL_YUV_CONVERSION_JPEG;
936  else if (frame->colorspace == AVCOL_SPC_BT709)
937  mode = SDL_YUV_CONVERSION_BT709;
938  else if (frame->colorspace == AVCOL_SPC_BT470BG || frame->colorspace == AVCOL_SPC_SMPTE170M)
939  mode = SDL_YUV_CONVERSION_BT601;
940  }
941  SDL_SetYUVConversionMode(mode); /* FIXME: no support for linear transfer */
942 #endif
943 }
944 
946 {
947  Frame *vp;
948  Frame *sp = NULL;
949  SDL_Rect rect;
950 
951  vp = frame_queue_peek_last(&is->pictq);
952  if (is->subtitle_st) {
953  if (frame_queue_nb_remaining(&is->subpq) > 0) {
954  sp = frame_queue_peek(&is->subpq);
955 
956  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
957  if (!sp->uploaded) {
958  uint8_t* pixels[4];
959  int pitch[4];
960  int i;
961  if (!sp->width || !sp->height) {
962  sp->width = vp->width;
963  sp->height = vp->height;
964  }
965  if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
966  return;
967 
968  for (i = 0; i < sp->sub.num_rects; i++) {
969  AVSubtitleRect *sub_rect = sp->sub.rects[i];
970 
971  sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
972  sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
973  sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
974  sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
975 
976  is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx,
977  sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
978  sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
979  0, NULL, NULL, NULL);
980  if (!is->sub_convert_ctx) {
981  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
982  return;
983  }
984  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) {
985  sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
986  0, sub_rect->h, pixels, pitch);
987  SDL_UnlockTexture(is->sub_texture);
988  }
989  }
990  sp->uploaded = 1;
991  }
992  } else
993  sp = NULL;
994  }
995  }
996 
997  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
999 
1000  if (!vp->uploaded) {
1001  if (upload_texture(&is->vid_texture, vp->frame) < 0) {
1003  return;
1004  }
1005  vp->uploaded = 1;
1006  vp->flip_v = vp->frame->linesize[0] < 0;
1007  }
1008 
1009  SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
1011  if (sp) {
1012 #if USE_ONEPASS_SUBTITLE_RENDER
1013  SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
1014 #else
1015  int i;
1016  double xratio = (double)rect.w / (double)sp->width;
1017  double yratio = (double)rect.h / (double)sp->height;
1018  for (i = 0; i < sp->sub.num_rects; i++) {
1019  SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
1020  SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
1021  .y = rect.y + sub_rect->y * yratio,
1022  .w = sub_rect->w * xratio,
1023  .h = sub_rect->h * yratio};
1024  SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
1025  }
1026 #endif
1027  }
1028 }
1029 
1030 static inline int compute_mod(int a, int b)
1031 {
1032  return a < 0 ? a%b + b : a%b;
1033 }
1034 
1036 {
1037  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1038  int ch, channels, h, h2;
1039  int64_t time_diff;
1040  int rdft_bits, nb_freq;
1041 
1042  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1043  ;
1044  nb_freq = 1 << (rdft_bits - 1);
1045 
1046  /* compute display index : center on currently output samples */
1047  channels = s->audio_tgt.ch_layout.nb_channels;
1048  nb_display_channels = channels;
1049  if (!s->paused) {
1050  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1051  n = 2 * channels;
1052  delay = s->audio_write_buf_size;
1053  delay /= n;
1054 
1055  /* to be more precise, we take into account the time spent since
1056  the last buffer computation */
1057  if (audio_callback_time) {
1058  time_diff = av_gettime_relative() - audio_callback_time;
1059  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1060  }
1061 
1062  delay += 2 * data_used;
1063  if (delay < data_used)
1064  delay = data_used;
1065 
1066  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1067  if (s->show_mode == SHOW_MODE_WAVES) {
1068  h = INT_MIN;
1069  for (i = 0; i < 1000; i += channels) {
1070  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1071  int a = s->sample_array[idx];
1072  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1073  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1074  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1075  int score = a - d;
1076  if (h < score && (b ^ c) < 0) {
1077  h = score;
1078  i_start = idx;
1079  }
1080  }
1081  }
1082 
1083  s->last_i_start = i_start;
1084  } else {
1085  i_start = s->last_i_start;
1086  }
1087 
1088  if (s->show_mode == SHOW_MODE_WAVES) {
1089  SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
1090 
1091  /* total height for one channel */
1092  h = s->height / nb_display_channels;
1093  /* graph height / 2 */
1094  h2 = (h * 9) / 20;
1095  for (ch = 0; ch < nb_display_channels; ch++) {
1096  i = i_start + ch;
1097  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1098  for (x = 0; x < s->width; x++) {
1099  y = (s->sample_array[i] * h2) >> 15;
1100  if (y < 0) {
1101  y = -y;
1102  ys = y1 - y;
1103  } else {
1104  ys = y1;
1105  }
1106  fill_rectangle(s->xleft + x, ys, 1, y);
1107  i += channels;
1108  if (i >= SAMPLE_ARRAY_SIZE)
1109  i -= SAMPLE_ARRAY_SIZE;
1110  }
1111  }
1112 
1113  SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
1114 
1115  for (ch = 1; ch < nb_display_channels; ch++) {
1116  y = s->ytop + ch * h;
1117  fill_rectangle(s->xleft, y, s->width, 1);
1118  }
1119  } else {
1120  if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
1121  return;
1122 
1123  if (s->xpos >= s->width)
1124  s->xpos = 0;
1125  nb_display_channels= FFMIN(nb_display_channels, 2);
1126  if (rdft_bits != s->rdft_bits) {
1127  av_rdft_end(s->rdft);
1128  av_free(s->rdft_data);
1129  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
1130  s->rdft_bits = rdft_bits;
1131  s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
1132  }
1133  if (!s->rdft || !s->rdft_data){
1134  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1135  s->show_mode = SHOW_MODE_WAVES;
1136  } else {
1137  FFTSample *data[2];
1138  SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
1139  uint32_t *pixels;
1140  int pitch;
1141  for (ch = 0; ch < nb_display_channels; ch++) {
1142  data[ch] = s->rdft_data + 2 * nb_freq * ch;
1143  i = i_start + ch;
1144  for (x = 0; x < 2 * nb_freq; x++) {
1145  double w = (x-nb_freq) * (1.0 / nb_freq);
1146  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
1147  i += channels;
1148  if (i >= SAMPLE_ARRAY_SIZE)
1149  i -= SAMPLE_ARRAY_SIZE;
1150  }
1151  av_rdft_calc(s->rdft, data[ch]);
1152  }
1153  /* Least efficient way to do this, we should of course
1154  * directly access it but it is more than fast enough. */
1155  if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
1156  pitch >>= 2;
1157  pixels += pitch * s->height;
1158  for (y = 0; y < s->height; y++) {
1159  double w = 1 / sqrt(nb_freq);
1160  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1161  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][2 * y + 0], data[1][2 * y + 1]))
1162  : a;
1163  a = FFMIN(a, 255);
1164  b = FFMIN(b, 255);
1165  pixels -= pitch;
1166  *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
1167  }
1168  SDL_UnlockTexture(s->vis_texture);
1169  }
1170  SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
1171  }
1172  if (!s->paused)
1173  s->xpos++;
1174  }
1175 }
1176 
1177 static void stream_component_close(VideoState *is, int stream_index)
1178 {
1179  AVFormatContext *ic = is->ic;
1180  AVCodecParameters *codecpar;
1181 
1182  if (stream_index < 0 || stream_index >= ic->nb_streams)
1183  return;
1184  codecpar = ic->streams[stream_index]->codecpar;
1185 
1186  switch (codecpar->codec_type) {
1187  case AVMEDIA_TYPE_AUDIO:
1188  decoder_abort(&is->auddec, &is->sampq);
1189  SDL_CloseAudioDevice(audio_dev);
1190  decoder_destroy(&is->auddec);
1191  swr_free(&is->swr_ctx);
1192  av_freep(&is->audio_buf1);
1193  is->audio_buf1_size = 0;
1194  is->audio_buf = NULL;
1195 
1196  if (is->rdft) {
1197  av_rdft_end(is->rdft);
1198  av_freep(&is->rdft_data);
1199  is->rdft = NULL;
1200  is->rdft_bits = 0;
1201  }
1202  break;
1203  case AVMEDIA_TYPE_VIDEO:
1204  decoder_abort(&is->viddec, &is->pictq);
1205  decoder_destroy(&is->viddec);
1206  break;
1207  case AVMEDIA_TYPE_SUBTITLE:
1208  decoder_abort(&is->subdec, &is->subpq);
1209  decoder_destroy(&is->subdec);
1210  break;
1211  default:
1212  break;
1213  }
1214 
1215  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1216  switch (codecpar->codec_type) {
1217  case AVMEDIA_TYPE_AUDIO:
1218  is->audio_st = NULL;
1219  is->audio_stream = -1;
1220  break;
1221  case AVMEDIA_TYPE_VIDEO:
1222  is->video_st = NULL;
1223  is->video_stream = -1;
1224  break;
1225  case AVMEDIA_TYPE_SUBTITLE:
1226  is->subtitle_st = NULL;
1227  is->subtitle_stream = -1;
1228  break;
1229  default:
1230  break;
1231  }
1232 }
1233 
1235 {
1236  /* XXX: use a special url_shutdown call to abort parse cleanly */
1237  is->abort_request = 1;
1238  SDL_WaitThread(is->read_tid, NULL);
1239 
1240  /* close each stream */
1241  if (is->audio_stream >= 0)
1242  stream_component_close(is, is->audio_stream);
1243  if (is->video_stream >= 0)
1244  stream_component_close(is, is->video_stream);
1245  if (is->subtitle_stream >= 0)
1246  stream_component_close(is, is->subtitle_stream);
1247 
1248  avformat_close_input(&is->ic);
1249 
1250  packet_queue_destroy(&is->videoq);
1251  packet_queue_destroy(&is->audioq);
1252  packet_queue_destroy(&is->subtitleq);
1253 
1254  /* free all pictures */
1255  frame_queue_destory(&is->pictq);
1256  frame_queue_destory(&is->sampq);
1257  frame_queue_destory(&is->subpq);
1258  SDL_DestroyCond(is->continue_read_thread);
1259  sws_freeContext(is->sub_convert_ctx);
1260  av_free(is->filename);
1261  if (is->vis_texture)
1262  SDL_DestroyTexture(is->vis_texture);
1263  if (is->vid_texture)
1264  SDL_DestroyTexture(is->vid_texture);
1265  if (is->sub_texture)
1266  SDL_DestroyTexture(is->sub_texture);
1267  av_free(is);
1268 }
1269 
1270 static void do_exit(VideoState *is)
1271 {
1272  if (is) {
1273  stream_close(is);
1274  }
1275  if (renderer)
1276  SDL_DestroyRenderer(renderer);
1277  if (window)
1278  SDL_DestroyWindow(window);
1279  uninit_opts();
1282  if (show_status)
1283  printf("\n");
1284  SDL_Quit();
1285  av_log(NULL, AV_LOG_QUIET, "%s", "");
1286  exit(0);
1287 }
1288 
1289 static void sigterm_handler(int sig)
1290 {
1291  exit(123);
1292 }
1293 
1295 {
1296  SDL_Rect rect;
1297  int max_width = screen_width ? screen_width : INT_MAX;
1298  int max_height = screen_height ? screen_height : INT_MAX;
1299  if (max_width == INT_MAX && max_height == INT_MAX)
1300  max_height = height;
1301  calculate_display_rect(&rect, 0, 0, max_width, max_height, width, height, sar);
1302  default_width = rect.w;
1303  default_height = rect.h;
1304 }
1305 
1307 {
1308  int w,h;
1309 
1312 
1313  if (!window_title)
1315  SDL_SetWindowTitle(window, window_title);
1316 
1317  SDL_SetWindowSize(window, w, h);
1318  SDL_SetWindowPosition(window, screen_left, screen_top);
1319  if (is_full_screen)
1320  SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1321  SDL_ShowWindow(window);
1322 
1323  is->width = w;
1324  is->height = h;
1325 
1326  return 0;
1327 }
1328 
1329 /* display the current picture, if any */
1331 {
1332  if (!is->width)
1333  video_open(is);
1334 
1335  SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1336  SDL_RenderClear(renderer);
1337  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1339  else if (is->video_st)
1341  SDL_RenderPresent(renderer);
1342 }
1343 
1344 static double get_clock(Clock *c)
1345 {
1346  if (*c->queue_serial != c->serial)
1347  return NAN;
1348  if (c->paused) {
1349  return c->pts;
1350  } else {
1351  double time = av_gettime_relative() / 1000000.0;
1352  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1353  }
1354 }
1355 
1356 static void set_clock_at(Clock *c, double pts, int serial, double time)
1357 {
1358  c->pts = pts;
1359  c->last_updated = time;
1360  c->pts_drift = c->pts - time;
1361  c->serial = serial;
1362 }
1363 
1364 static void set_clock(Clock *c, double pts, int serial)
1365 {
1366  double time = av_gettime_relative() / 1000000.0;
1367  set_clock_at(c, pts, serial, time);
1368 }
1369 
1370 static void set_clock_speed(Clock *c, double speed)
1371 {
1372  set_clock(c, get_clock(c), c->serial);
1373  c->speed = speed;
1374 }
1375 
1376 static void init_clock(Clock *c, int *queue_serial)
1377 {
1378  c->speed = 1.0;
1379  c->paused = 0;
1380  c->queue_serial = queue_serial;
1381  set_clock(c, NAN, -1);
1382 }
1383 
1384 static void sync_clock_to_slave(Clock *c, Clock *slave)
1385 {
1386  double clock = get_clock(c);
1387  double slave_clock = get_clock(slave);
1388  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1389  set_clock(c, slave_clock, slave->serial);
1390 }
1391 
1393  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1394  if (is->video_st)
1395  return AV_SYNC_VIDEO_MASTER;
1396  else
1397  return AV_SYNC_AUDIO_MASTER;
1398  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1399  if (is->audio_st)
1400  return AV_SYNC_AUDIO_MASTER;
1401  else
1402  return AV_SYNC_EXTERNAL_CLOCK;
1403  } else {
1404  return AV_SYNC_EXTERNAL_CLOCK;
1405  }
1406 }
1407 
1408 /* get the current master clock value */
1410 {
1411  double val;
1412 
1413  switch (get_master_sync_type(is)) {
1414  case AV_SYNC_VIDEO_MASTER:
1415  val = get_clock(&is->vidclk);
1416  break;
1417  case AV_SYNC_AUDIO_MASTER:
1418  val = get_clock(&is->audclk);
1419  break;
1420  default:
1421  val = get_clock(&is->extclk);
1422  break;
1423  }
1424  return val;
1425 }
1426 
1428  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1429  is->audio_stream >= 0 && is->audioq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES) {
1431  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1432  (is->audio_stream < 0 || is->audioq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES)) {
1434  } else {
1435  double speed = is->extclk.speed;
1436  if (speed != 1.0)
1437  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1438  }
1439 }
1440 
1441 /* seek in the stream */
1442 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int by_bytes)
1443 {
1444  if (!is->seek_req) {
1445  is->seek_pos = pos;
1446  is->seek_rel = rel;
1447  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1448  if (by_bytes)
1449  is->seek_flags |= AVSEEK_FLAG_BYTE;
1450  is->seek_req = 1;
1451  SDL_CondSignal(is->continue_read_thread);
1452  }
1453 }
1454 
1455 /* pause or resume the video */
1457 {
1458  if (is->paused) {
1459  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1460  if (is->read_pause_return != AVERROR(ENOSYS)) {
1461  is->vidclk.paused = 0;
1462  }
1463  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1464  }
1465  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1466  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1467 }
1468 
1470 {
1472  is->step = 0;
1473 }
1474 
1476 {
1477  is->muted = !is->muted;
1478 }
1479 
1480 static void update_volume(VideoState *is, int sign, double step)
1481 {
1482  double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1483  int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0));
1484  is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1485 }
1486 
1488 {
1489  /* if the stream is paused unpause it, then step */
1490  if (is->paused)
1492  is->step = 1;
1493 }
1494 
1495 static double compute_target_delay(double delay, VideoState *is)
1496 {
1497  double sync_threshold, diff = 0;
1498 
1499  /* update delay to follow master synchronisation source */
1501  /* if video is slave, we try to correct big delays by
1502  duplicating or deleting a frame */
1503  diff = get_clock(&is->vidclk) - get_master_clock(is);
1504 
1505  /* skip or repeat frame. We take into account the
1506  delay to compute the threshold. I still don't know
1507  if it is the best guess */
1508  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1509  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1510  if (diff <= -sync_threshold)
1511  delay = FFMAX(0, delay + diff);
1512  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1513  delay = delay + diff;
1514  else if (diff >= sync_threshold)
1515  delay = 2 * delay;
1516  }
1517  }
1518 
1519  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1520  delay, -diff);
1521 
1522  return delay;
1523 }
1524 
1525 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1526  if (vp->serial == nextvp->serial) {
1527  double duration = nextvp->pts - vp->pts;
1528  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1529  return vp->duration;
1530  else
1531  return duration;
1532  } else {
1533  return 0.0;
1534  }
1535 }
1536 
1537 static void update_video_pts(VideoState *is, double pts, int serial)
1538 {
1539  /* update current video pts */
1540  set_clock(&is->vidclk, pts, serial);
1541  sync_clock_to_slave(&is->extclk, &is->vidclk);
1542 }
1543 
1544 /* called to display each frame */
1545 static void video_refresh(void *opaque, double *remaining_time)
1546 {
1547  VideoState *is = opaque;
1548  double time;
1549 
1550  Frame *sp, *sp2;
1551 
1552  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1554 
1555  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1556  time = av_gettime_relative() / 1000000.0;
1557  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1558  video_display(is);
1559  is->last_vis_time = time;
1560  }
1561  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1562  }
1563 
1564  if (is->video_st) {
1565 retry:
1566  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1567  // nothing to do, no picture to display in the queue
1568  } else {
1569  double last_duration, duration, delay;
1570  Frame *vp, *lastvp;
1571 
1572  /* dequeue the picture */
1573  lastvp = frame_queue_peek_last(&is->pictq);
1574  vp = frame_queue_peek(&is->pictq);
1575 
1576  if (vp->serial != is->videoq.serial) {
1577  frame_queue_next(&is->pictq);
1578  goto retry;
1579  }
1580 
1581  if (lastvp->serial != vp->serial)
1582  is->frame_timer = av_gettime_relative() / 1000000.0;
1583 
1584  if (is->paused)
1585  goto display;
1586 
1587  /* compute nominal last_duration */
1588  last_duration = vp_duration(is, lastvp, vp);
1589  delay = compute_target_delay(last_duration, is);
1590 
1591  time= av_gettime_relative()/1000000.0;
1592  if (time < is->frame_timer + delay) {
1593  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1594  goto display;
1595  }
1596 
1597  is->frame_timer += delay;
1598  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1599  is->frame_timer = time;
1600 
1601  SDL_LockMutex(is->pictq.mutex);
1602  if (!isnan(vp->pts))
1603  update_video_pts(is, vp->pts, vp->serial);
1604  SDL_UnlockMutex(is->pictq.mutex);
1605 
1606  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1607  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1608  duration = vp_duration(is, vp, nextvp);
1609  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1610  is->frame_drops_late++;
1611  frame_queue_next(&is->pictq);
1612  goto retry;
1613  }
1614  }
1615 
1616  if (is->subtitle_st) {
1617  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1618  sp = frame_queue_peek(&is->subpq);
1619 
1620  if (frame_queue_nb_remaining(&is->subpq) > 1)
1621  sp2 = frame_queue_peek_next(&is->subpq);
1622  else
1623  sp2 = NULL;
1624 
1625  if (sp->serial != is->subtitleq.serial
1626  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1627  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1628  {
1629  if (sp->uploaded) {
1630  int i;
1631  for (i = 0; i < sp->sub.num_rects; i++) {
1632  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1633  uint8_t *pixels;
1634  int pitch, j;
1635 
1636  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
1637  for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1638  memset(pixels, 0, sub_rect->w << 2);
1639  SDL_UnlockTexture(is->sub_texture);
1640  }
1641  }
1642  }
1643  frame_queue_next(&is->subpq);
1644  } else {
1645  break;
1646  }
1647  }
1648  }
1649 
1650  frame_queue_next(&is->pictq);
1651  is->force_refresh = 1;
1652 
1653  if (is->step && !is->paused)
1655  }
1656 display:
1657  /* display picture */
1658  if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1659  video_display(is);
1660  }
1661  is->force_refresh = 0;
1662  if (show_status) {
1663  AVBPrint buf;
1664  static int64_t last_time;
1665  int64_t cur_time;
1666  int aqsize, vqsize, sqsize;
1667  double av_diff;
1668 
1669  cur_time = av_gettime_relative();
1670  if (!last_time || (cur_time - last_time) >= 30000) {
1671  aqsize = 0;
1672  vqsize = 0;
1673  sqsize = 0;
1674  if (is->audio_st)
1675  aqsize = is->audioq.size;
1676  if (is->video_st)
1677  vqsize = is->videoq.size;
1678  if (is->subtitle_st)
1679  sqsize = is->subtitleq.size;
1680  av_diff = 0;
1681  if (is->audio_st && is->video_st)
1682  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1683  else if (is->video_st)
1684  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1685  else if (is->audio_st)
1686  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1687 
1689  av_bprintf(&buf,
1690  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1692  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1693  av_diff,
1694  is->frame_drops_early + is->frame_drops_late,
1695  aqsize / 1024,
1696  vqsize / 1024,
1697  sqsize,
1698  is->video_st ? is->viddec.avctx->pts_correction_num_faulty_dts : 0,
1699  is->video_st ? is->viddec.avctx->pts_correction_num_faulty_pts : 0);
1700 
1701  if (show_status == 1 && AV_LOG_INFO > av_log_get_level())
1702  fprintf(stderr, "%s", buf.str);
1703  else
1704  av_log(NULL, AV_LOG_INFO, "%s", buf.str);
1705 
1706  fflush(stderr);
1707  av_bprint_finalize(&buf, NULL);
1708 
1709  last_time = cur_time;
1710  }
1711  }
1712 }
1713 
1714 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1715 {
1716  Frame *vp;
1717 
1718 #if defined(DEBUG_SYNC)
1719  printf("frame_type=%c pts=%0.3f\n",
1720  av_get_picture_type_char(src_frame->pict_type), pts);
1721 #endif
1722 
1723  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1724  return -1;
1725 
1726  vp->sar = src_frame->sample_aspect_ratio;
1727  vp->uploaded = 0;
1728 
1729  vp->width = src_frame->width;
1730  vp->height = src_frame->height;
1731  vp->format = src_frame->format;
1732 
1733  vp->pts = pts;
1734  vp->duration = duration;
1735  vp->pos = pos;
1736  vp->serial = serial;
1737 
1738  set_default_window_size(vp->width, vp->height, vp->sar);
1739 
1740  av_frame_move_ref(vp->frame, src_frame);
1741  frame_queue_push(&is->pictq);
1742  return 0;
1743 }
1744 
1746 {
1747  int got_picture;
1748 
1749  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1750  return -1;
1751 
1752  if (got_picture) {
1753  double dpts = NAN;
1754 
1755  if (frame->pts != AV_NOPTS_VALUE)
1756  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1757 
1758  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1759 
1761  if (frame->pts != AV_NOPTS_VALUE) {
1762  double diff = dpts - get_master_clock(is);
1763  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1764  diff - is->frame_last_filter_delay < 0 &&
1765  is->viddec.pkt_serial == is->vidclk.serial &&
1766  is->videoq.nb_packets) {
1767  is->frame_drops_early++;
1769  got_picture = 0;
1770  }
1771  }
1772  }
1773  }
1774 
1775  return got_picture;
1776 }
1777 
1778 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1779  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1780 {
1781  int ret, i;
1782  int nb_filters = graph->nb_filters;
1784 
1785  if (filtergraph) {
1788  if (!outputs || !inputs) {
1789  ret = AVERROR(ENOMEM);
1790  goto fail;
1791  }
1792 
1793  outputs->name = av_strdup("in");
1794  outputs->filter_ctx = source_ctx;
1795  outputs->pad_idx = 0;
1796  outputs->next = NULL;
1797 
1798  inputs->name = av_strdup("out");
1799  inputs->filter_ctx = sink_ctx;
1800  inputs->pad_idx = 0;
1801  inputs->next = NULL;
1802 
1803  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1804  goto fail;
1805  } else {
1806  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1807  goto fail;
1808  }
1809 
1810  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1811  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1812  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1813 
1814  ret = avfilter_graph_config(graph, NULL);
1815 fail:
1818  return ret;
1819 }
1820 
1821 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1822 {
1824  char sws_flags_str[512] = "";
1825  char buffersrc_args[256];
1826  int ret;
1827  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1828  AVCodecParameters *codecpar = is->video_st->codecpar;
1829  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1830  const AVDictionaryEntry *e = NULL;
1831  int nb_pix_fmts = 0;
1832  int i, j;
1833 
1834  for (i = 0; i < renderer_info.num_texture_formats; i++) {
1835  for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; j++) {
1836  if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
1837  pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
1838  break;
1839  }
1840  }
1841  }
1842  pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE;
1843 
1844  while ((e = av_dict_iterate(sws_dict, e))) {
1845  if (!strcmp(e->key, "sws_flags")) {
1846  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1847  } else
1848  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1849  }
1850  if (strlen(sws_flags_str))
1851  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1852 
1853  graph->scale_sws_opts = av_strdup(sws_flags_str);
1854 
1855  snprintf(buffersrc_args, sizeof(buffersrc_args),
1856  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1857  frame->width, frame->height, frame->format,
1858  is->video_st->time_base.num, is->video_st->time_base.den,
1859  codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1));
1860  if (fr.num && fr.den)
1861  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1862 
1863  if ((ret = avfilter_graph_create_filter(&filt_src,
1864  avfilter_get_by_name("buffer"),
1865  "ffplay_buffer", buffersrc_args, NULL,
1866  graph)) < 0)
1867  goto fail;
1868 
1869  ret = avfilter_graph_create_filter(&filt_out,
1870  avfilter_get_by_name("buffersink"),
1871  "ffplay_buffersink", NULL, NULL, graph);
1872  if (ret < 0)
1873  goto fail;
1874 
1875  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1876  goto fail;
1877 
1878  last_filter = filt_out;
1879 
1880 /* Note: this macro adds a filter before the lastly added filter, so the
1881  * processing order of the filters is in reverse */
1882 #define INSERT_FILT(name, arg) do { \
1883  AVFilterContext *filt_ctx; \
1884  \
1885  ret = avfilter_graph_create_filter(&filt_ctx, \
1886  avfilter_get_by_name(name), \
1887  "ffplay_" name, arg, NULL, graph); \
1888  if (ret < 0) \
1889  goto fail; \
1890  \
1891  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1892  if (ret < 0) \
1893  goto fail; \
1894  \
1895  last_filter = filt_ctx; \
1896 } while (0)
1897 
1898  if (autorotate) {
1899  double theta = 0.0;
1900  int32_t *displaymatrix = NULL;
1902  if (sd)
1903  displaymatrix = (int32_t *)sd->data;
1904  if (!displaymatrix)
1905  displaymatrix = (int32_t *)av_stream_get_side_data(is->video_st, AV_PKT_DATA_DISPLAYMATRIX, NULL);
1906  theta = get_rotation(displaymatrix);
1907 
1908  if (fabs(theta - 90) < 1.0) {
1909  INSERT_FILT("transpose", "clock");
1910  } else if (fabs(theta - 180) < 1.0) {
1911  INSERT_FILT("hflip", NULL);
1912  INSERT_FILT("vflip", NULL);
1913  } else if (fabs(theta - 270) < 1.0) {
1914  INSERT_FILT("transpose", "cclock");
1915  } else if (fabs(theta) > 1.0) {
1916  char rotate_buf[64];
1917  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1918  INSERT_FILT("rotate", rotate_buf);
1919  }
1920  }
1921 
1922  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1923  goto fail;
1924 
1925  is->in_video_filter = filt_src;
1926  is->out_video_filter = filt_out;
1927 
1928 fail:
1929  return ret;
1930 }
1931 
1932 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1933 {
1935  int sample_rates[2] = { 0, -1 };
1936  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1937  char aresample_swr_opts[512] = "";
1938  const AVDictionaryEntry *e = NULL;
1939  AVBPrint bp;
1940  char asrc_args[256];
1941  int ret;
1942 
1943  avfilter_graph_free(&is->agraph);
1944  if (!(is->agraph = avfilter_graph_alloc()))
1945  return AVERROR(ENOMEM);
1946  is->agraph->nb_threads = filter_nbthreads;
1947 
1949 
1950  while ((e = av_dict_iterate(swr_opts, e)))
1951  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
1952  if (strlen(aresample_swr_opts))
1953  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
1954  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
1955 
1956  av_channel_layout_describe_bprint(&is->audio_filter_src.ch_layout, &bp);
1957 
1958  ret = snprintf(asrc_args, sizeof(asrc_args),
1959  "sample_rate=%d:sample_fmt=%s:time_base=%d/%d:channel_layout=%s",
1960  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1961  1, is->audio_filter_src.freq, bp.str);
1962 
1963  ret = avfilter_graph_create_filter(&filt_asrc,
1964  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1965  asrc_args, NULL, is->agraph);
1966  if (ret < 0)
1967  goto end;
1968 
1969 
1970  ret = avfilter_graph_create_filter(&filt_asink,
1971  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1972  NULL, NULL, is->agraph);
1973  if (ret < 0)
1974  goto end;
1975 
1976  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1977  goto end;
1978  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1979  goto end;
1980 
1981  if (force_output_format) {
1982  sample_rates [0] = is->audio_tgt.freq;
1983  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
1984  goto end;
1985  if ((ret = av_opt_set(filt_asink, "ch_layouts", bp.str, AV_OPT_SEARCH_CHILDREN)) < 0)
1986  goto end;
1987  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1988  goto end;
1989  }
1990 
1991 
1992  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
1993  goto end;
1994 
1995  is->in_audio_filter = filt_asrc;
1996  is->out_audio_filter = filt_asink;
1997 
1998 end:
1999  if (ret < 0)
2000  avfilter_graph_free(&is->agraph);
2001  av_bprint_finalize(&bp, NULL);
2002 
2003  return ret;
2004 }
2005 
2006 static int audio_thread(void *arg)
2007 {
2008  VideoState *is = arg;
2010  Frame *af;
2011  int last_serial = -1;
2012  int reconfigure;
2013  int got_frame = 0;
2014  AVRational tb;
2015  int ret = 0;
2016 
2017  if (!frame)
2018  return AVERROR(ENOMEM);
2019 
2020  do {
2021  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2022  goto the_end;
2023 
2024  if (got_frame) {
2025  tb = (AVRational){1, frame->sample_rate};
2026 
2027  reconfigure =
2028  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.ch_layout.nb_channels,
2029  frame->format, frame->ch_layout.nb_channels) ||
2030  av_channel_layout_compare(&is->audio_filter_src.ch_layout, &frame->ch_layout) ||
2031  is->audio_filter_src.freq != frame->sample_rate ||
2032  is->auddec.pkt_serial != last_serial;
2033 
2034  if (reconfigure) {
2035  char buf1[1024], buf2[1024];
2036  av_channel_layout_describe(&is->audio_filter_src.ch_layout, buf1, sizeof(buf1));
2037  av_channel_layout_describe(&frame->ch_layout, buf2, sizeof(buf2));
2039  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2040  is->audio_filter_src.freq, is->audio_filter_src.ch_layout.nb_channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2041  frame->sample_rate, frame->ch_layout.nb_channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
2042 
2043  is->audio_filter_src.fmt = frame->format;
2044  ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &frame->ch_layout);
2045  if (ret < 0)
2046  goto the_end;
2047  is->audio_filter_src.freq = frame->sample_rate;
2048  last_serial = is->auddec.pkt_serial;
2049 
2050  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2051  goto the_end;
2052  }
2053 
2054  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2055  goto the_end;
2056 
2057  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2058  FrameData *fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL;
2059  tb = av_buffersink_get_time_base(is->out_audio_filter);
2060  if (!(af = frame_queue_peek_writable(&is->sampq)))
2061  goto the_end;
2062 
2063  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2064  af->pos = fd ? fd->pkt_pos : -1;
2065  af->serial = is->auddec.pkt_serial;
2066  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2067 
2069  frame_queue_push(&is->sampq);
2070 
2071  if (is->audioq.serial != is->auddec.pkt_serial)
2072  break;
2073  }
2074  if (ret == AVERROR_EOF)
2075  is->auddec.finished = is->auddec.pkt_serial;
2076  }
2077  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2078  the_end:
2079  avfilter_graph_free(&is->agraph);
2080  av_frame_free(&frame);
2081  return ret;
2082 }
2083 
2084 static int decoder_start(Decoder *d, int (*fn)(void *), const char *thread_name, void* arg)
2085 {
2086  packet_queue_start(d->queue);
2087  d->decoder_tid = SDL_CreateThread(fn, thread_name, arg);
2088  if (!d->decoder_tid) {
2089  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2090  return AVERROR(ENOMEM);
2091  }
2092  return 0;
2093 }
2094 
2095 static int video_thread(void *arg)
2096 {
2097  VideoState *is = arg;
2099  double pts;
2100  double duration;
2101  int ret;
2102  AVRational tb = is->video_st->time_base;
2103  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2104 
2105  AVFilterGraph *graph = NULL;
2106  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2107  int last_w = 0;
2108  int last_h = 0;
2109  enum AVPixelFormat last_format = -2;
2110  int last_serial = -1;
2111  int last_vfilter_idx = 0;
2112 
2113  if (!frame)
2114  return AVERROR(ENOMEM);
2115 
2116  for (;;) {
2118  if (ret < 0)
2119  goto the_end;
2120  if (!ret)
2121  continue;
2122 
2123  if ( last_w != frame->width
2124  || last_h != frame->height
2125  || last_format != frame->format
2126  || last_serial != is->viddec.pkt_serial
2127  || last_vfilter_idx != is->vfilter_idx) {
2129  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2130  last_w, last_h,
2131  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2132  frame->width, frame->height,
2133  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2134  avfilter_graph_free(&graph);
2135  graph = avfilter_graph_alloc();
2136  if (!graph) {
2137  ret = AVERROR(ENOMEM);
2138  goto the_end;
2139  }
2140  graph->nb_threads = filter_nbthreads;
2141  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2142  SDL_Event event;
2143  event.type = FF_QUIT_EVENT;
2144  event.user.data1 = is;
2145  SDL_PushEvent(&event);
2146  goto the_end;
2147  }
2148  filt_in = is->in_video_filter;
2149  filt_out = is->out_video_filter;
2150  last_w = frame->width;
2151  last_h = frame->height;
2152  last_format = frame->format;
2153  last_serial = is->viddec.pkt_serial;
2154  last_vfilter_idx = is->vfilter_idx;
2155  frame_rate = av_buffersink_get_frame_rate(filt_out);
2156  }
2157 
2158  ret = av_buffersrc_add_frame(filt_in, frame);
2159  if (ret < 0)
2160  goto the_end;
2161 
2162  while (ret >= 0) {
2163  FrameData *fd;
2164 
2165  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2166 
2167  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2168  if (ret < 0) {
2169  if (ret == AVERROR_EOF)
2170  is->viddec.finished = is->viddec.pkt_serial;
2171  ret = 0;
2172  break;
2173  }
2174 
2175  fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL;
2176 
2177  is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
2178  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2179  is->frame_last_filter_delay = 0;
2180  tb = av_buffersink_get_time_base(filt_out);
2181  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2182  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2183  ret = queue_picture(is, frame, pts, duration, fd ? fd->pkt_pos : -1, is->viddec.pkt_serial);
2185  if (is->videoq.serial != is->viddec.pkt_serial)
2186  break;
2187  }
2188 
2189  if (ret < 0)
2190  goto the_end;
2191  }
2192  the_end:
2193  avfilter_graph_free(&graph);
2194  av_frame_free(&frame);
2195  return 0;
2196 }
2197 
2198 static int subtitle_thread(void *arg)
2199 {
2200  VideoState *is = arg;
2201  Frame *sp;
2202  int got_subtitle;
2203  double pts;
2204 
2205  for (;;) {
2206  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2207  return 0;
2208 
2209  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2210  break;
2211 
2212  pts = 0;
2213 
2214  if (got_subtitle && sp->sub.format == 0) {
2215  if (sp->sub.pts != AV_NOPTS_VALUE)
2216  pts = sp->sub.pts / (double)AV_TIME_BASE;
2217  sp->pts = pts;
2218  sp->serial = is->subdec.pkt_serial;
2219  sp->width = is->subdec.avctx->width;
2220  sp->height = is->subdec.avctx->height;
2221  sp->uploaded = 0;
2222 
2223  /* now we can update the picture count */
2224  frame_queue_push(&is->subpq);
2225  } else if (got_subtitle) {
2226  avsubtitle_free(&sp->sub);
2227  }
2228  }
2229  return 0;
2230 }
2231 
2232 /* copy samples for viewing in editor window */
2233 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2234 {
2235  int size, len;
2236 
2237  size = samples_size / sizeof(short);
2238  while (size > 0) {
2239  len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2240  if (len > size)
2241  len = size;
2242  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2243  samples += len;
2244  is->sample_array_index += len;
2245  if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2246  is->sample_array_index = 0;
2247  size -= len;
2248  }
2249 }
2250 
2251 /* return the wanted number of samples to get better sync if sync_type is video
2252  * or external master clock */
2253 static int synchronize_audio(VideoState *is, int nb_samples)
2254 {
2255  int wanted_nb_samples = nb_samples;
2256 
2257  /* if not master, then we try to remove or add samples to correct the clock */
2259  double diff, avg_diff;
2260  int min_nb_samples, max_nb_samples;
2261 
2262  diff = get_clock(&is->audclk) - get_master_clock(is);
2263 
2264  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2265  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2266  if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2267  /* not enough measures to have a correct estimate */
2268  is->audio_diff_avg_count++;
2269  } else {
2270  /* estimate the A-V difference */
2271  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2272 
2273  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2274  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2275  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2276  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2277  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2278  }
2279  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2280  diff, avg_diff, wanted_nb_samples - nb_samples,
2281  is->audio_clock, is->audio_diff_threshold);
2282  }
2283  } else {
2284  /* too big difference : may be initial PTS errors, so
2285  reset A-V filter */
2286  is->audio_diff_avg_count = 0;
2287  is->audio_diff_cum = 0;
2288  }
2289  }
2290 
2291  return wanted_nb_samples;
2292 }
2293 
2294 /**
2295  * Decode one audio frame and return its uncompressed size.
2296  *
2297  * The processed audio frame is decoded, converted if required, and
2298  * stored in is->audio_buf, with size in bytes given by the return
2299  * value.
2300  */
2302 {
2303  int data_size, resampled_data_size;
2304  av_unused double audio_clock0;
2305  int wanted_nb_samples;
2306  Frame *af;
2307 
2308  if (is->paused)
2309  return -1;
2310 
2311  do {
2312 #if defined(_WIN32)
2313  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2314  if ((av_gettime_relative() - audio_callback_time) > 1000000LL * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec / 2)
2315  return -1;
2316  av_usleep (1000);
2317  }
2318 #endif
2319  if (!(af = frame_queue_peek_readable(&is->sampq)))
2320  return -1;
2321  frame_queue_next(&is->sampq);
2322  } while (af->serial != is->audioq.serial);
2323 
2325  af->frame->nb_samples,
2326  af->frame->format, 1);
2327 
2328  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2329 
2330  if (af->frame->format != is->audio_src.fmt ||
2331  av_channel_layout_compare(&af->frame->ch_layout, &is->audio_src.ch_layout) ||
2332  af->frame->sample_rate != is->audio_src.freq ||
2333  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2334  swr_free(&is->swr_ctx);
2335  swr_alloc_set_opts2(&is->swr_ctx,
2336  &is->audio_tgt.ch_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2337  &af->frame->ch_layout, af->frame->format, af->frame->sample_rate,
2338  0, NULL);
2339  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2341  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2343  is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.ch_layout.nb_channels);
2344  swr_free(&is->swr_ctx);
2345  return -1;
2346  }
2347  if (av_channel_layout_copy(&is->audio_src.ch_layout, &af->frame->ch_layout) < 0)
2348  return -1;
2349  is->audio_src.freq = af->frame->sample_rate;
2350  is->audio_src.fmt = af->frame->format;
2351  }
2352 
2353  if (is->swr_ctx) {
2354  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2355  uint8_t **out = &is->audio_buf1;
2356  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2357  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.ch_layout.nb_channels, out_count, is->audio_tgt.fmt, 0);
2358  int len2;
2359  if (out_size < 0) {
2360  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2361  return -1;
2362  }
2363  if (wanted_nb_samples != af->frame->nb_samples) {
2364  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2365  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2366  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2367  return -1;
2368  }
2369  }
2370  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2371  if (!is->audio_buf1)
2372  return AVERROR(ENOMEM);
2373  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2374  if (len2 < 0) {
2375  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2376  return -1;
2377  }
2378  if (len2 == out_count) {
2379  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2380  if (swr_init(is->swr_ctx) < 0)
2381  swr_free(&is->swr_ctx);
2382  }
2383  is->audio_buf = is->audio_buf1;
2384  resampled_data_size = len2 * is->audio_tgt.ch_layout.nb_channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2385  } else {
2386  is->audio_buf = af->frame->data[0];
2387  resampled_data_size = data_size;
2388  }
2389 
2390  audio_clock0 = is->audio_clock;
2391  /* update the audio clock with the pts */
2392  if (!isnan(af->pts))
2393  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2394  else
2395  is->audio_clock = NAN;
2396  is->audio_clock_serial = af->serial;
2397 #ifdef DEBUG
2398  {
2399  static double last_clock;
2400  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2401  is->audio_clock - last_clock,
2402  is->audio_clock, audio_clock0);
2403  last_clock = is->audio_clock;
2404  }
2405 #endif
2406  return resampled_data_size;
2407 }
2408 
2409 /* prepare a new audio buffer */
2410 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2411 {
2412  VideoState *is = opaque;
2413  int audio_size, len1;
2414 
2416 
2417  while (len > 0) {
2418  if (is->audio_buf_index >= is->audio_buf_size) {
2419  audio_size = audio_decode_frame(is);
2420  if (audio_size < 0) {
2421  /* if error, just output silence */
2422  is->audio_buf = NULL;
2423  is->audio_buf_size = SDL_AUDIO_MIN_BUFFER_SIZE / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2424  } else {
2425  if (is->show_mode != SHOW_MODE_VIDEO)
2426  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2427  is->audio_buf_size = audio_size;
2428  }
2429  is->audio_buf_index = 0;
2430  }
2431  len1 = is->audio_buf_size - is->audio_buf_index;
2432  if (len1 > len)
2433  len1 = len;
2434  if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2435  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2436  else {
2437  memset(stream, 0, len1);
2438  if (!is->muted && is->audio_buf)
2439  SDL_MixAudioFormat(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, is->audio_volume);
2440  }
2441  len -= len1;
2442  stream += len1;
2443  is->audio_buf_index += len1;
2444  }
2445  is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2446  /* Let's assume the audio driver that is used by SDL has two periods. */
2447  if (!isnan(is->audio_clock)) {
2448  set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
2449  sync_clock_to_slave(&is->extclk, &is->audclk);
2450  }
2451 }
2452 
2453 static int audio_open(void *opaque, AVChannelLayout *wanted_channel_layout, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2454 {
2455  SDL_AudioSpec wanted_spec, spec;
2456  const char *env;
2457  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2458  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2459  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2460  int wanted_nb_channels = wanted_channel_layout->nb_channels;
2461 
2462  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2463  if (env) {
2464  wanted_nb_channels = atoi(env);
2465  av_channel_layout_uninit(wanted_channel_layout);
2466  av_channel_layout_default(wanted_channel_layout, wanted_nb_channels);
2467  }
2468  if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) {
2469  av_channel_layout_uninit(wanted_channel_layout);
2470  av_channel_layout_default(wanted_channel_layout, wanted_nb_channels);
2471  }
2472  wanted_nb_channels = wanted_channel_layout->nb_channels;
2473  wanted_spec.channels = wanted_nb_channels;
2474  wanted_spec.freq = wanted_sample_rate;
2475  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2476  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2477  return -1;
2478  }
2479  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2480  next_sample_rate_idx--;
2481  wanted_spec.format = AUDIO_S16SYS;
2482  wanted_spec.silence = 0;
2483  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2484  wanted_spec.callback = sdl_audio_callback;
2485  wanted_spec.userdata = opaque;
2486  while (!(audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2487  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2488  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2489  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2490  if (!wanted_spec.channels) {
2491  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2492  wanted_spec.channels = wanted_nb_channels;
2493  if (!wanted_spec.freq) {
2495  "No more combinations to try, audio open failed\n");
2496  return -1;
2497  }
2498  }
2499  av_channel_layout_default(wanted_channel_layout, wanted_spec.channels);
2500  }
2501  if (spec.format != AUDIO_S16SYS) {
2503  "SDL advised audio format %d is not supported!\n", spec.format);
2504  return -1;
2505  }
2506  if (spec.channels != wanted_spec.channels) {
2507  av_channel_layout_uninit(wanted_channel_layout);
2508  av_channel_layout_default(wanted_channel_layout, spec.channels);
2509  if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) {
2511  "SDL advised channel count %d is not supported!\n", spec.channels);
2512  return -1;
2513  }
2514  }
2515 
2516  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2517  audio_hw_params->freq = spec.freq;
2518  if (av_channel_layout_copy(&audio_hw_params->ch_layout, wanted_channel_layout) < 0)
2519  return -1;
2520  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, 1, audio_hw_params->fmt, 1);
2521  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2522  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2523  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2524  return -1;
2525  }
2526  return spec.size;
2527 }
2528 
2529 /* open a given stream. Return 0 if OK */
2530 static int stream_component_open(VideoState *is, int stream_index)
2531 {
2532  AVFormatContext *ic = is->ic;
2533  AVCodecContext *avctx;
2534  const AVCodec *codec;
2535  const char *forced_codec_name = NULL;
2536  AVDictionary *opts = NULL;
2537  const AVDictionaryEntry *t = NULL;
2538  int sample_rate;
2539  AVChannelLayout ch_layout = { 0 };
2540  int ret = 0;
2541  int stream_lowres = lowres;
2542 
2543  if (stream_index < 0 || stream_index >= ic->nb_streams)
2544  return -1;
2545 
2546  avctx = avcodec_alloc_context3(NULL);
2547  if (!avctx)
2548  return AVERROR(ENOMEM);
2549 
2550  ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2551  if (ret < 0)
2552  goto fail;
2553  avctx->pkt_timebase = ic->streams[stream_index]->time_base;
2554 
2555  codec = avcodec_find_decoder(avctx->codec_id);
2556 
2557  switch(avctx->codec_type){
2558  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2559  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2560  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2561  }
2562  if (forced_codec_name)
2563  codec = avcodec_find_decoder_by_name(forced_codec_name);
2564  if (!codec) {
2565  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2566  "No codec could be found with name '%s'\n", forced_codec_name);
2567  else av_log(NULL, AV_LOG_WARNING,
2568  "No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id));
2569  ret = AVERROR(EINVAL);
2570  goto fail;
2571  }
2572 
2573  avctx->codec_id = codec->id;
2574  if (stream_lowres > codec->max_lowres) {
2575  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2576  codec->max_lowres);
2577  stream_lowres = codec->max_lowres;
2578  }
2579  avctx->lowres = stream_lowres;
2580 
2581  if (fast)
2582  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2583 
2584  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2585  if (!av_dict_get(opts, "threads", NULL, 0))
2586  av_dict_set(&opts, "threads", "auto", 0);
2587  if (stream_lowres)
2588  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2589 
2590  av_dict_set(&opts, "flags", "+copy_opaque", AV_DICT_MULTIKEY);
2591 
2592  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2593  goto fail;
2594  }
2595  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2596  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2598  goto fail;
2599  }
2600 
2601  is->eof = 0;
2602  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2603  switch (avctx->codec_type) {
2604  case AVMEDIA_TYPE_AUDIO:
2605  {
2606  AVFilterContext *sink;
2607 
2608  is->audio_filter_src.freq = avctx->sample_rate;
2609  ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &avctx->ch_layout);
2610  if (ret < 0)
2611  goto fail;
2612  is->audio_filter_src.fmt = avctx->sample_fmt;
2613  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2614  goto fail;
2615  sink = is->out_audio_filter;
2617  ret = av_buffersink_get_ch_layout(sink, &ch_layout);
2618  if (ret < 0)
2619  goto fail;
2620  }
2621 
2622  /* prepare audio output */
2623  if ((ret = audio_open(is, &ch_layout, sample_rate, &is->audio_tgt)) < 0)
2624  goto fail;
2625  is->audio_hw_buf_size = ret;
2626  is->audio_src = is->audio_tgt;
2627  is->audio_buf_size = 0;
2628  is->audio_buf_index = 0;
2629 
2630  /* init averaging filter */
2631  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2632  is->audio_diff_avg_count = 0;
2633  /* since we do not have a precise anough audio FIFO fullness,
2634  we correct audio sync only if larger than this threshold */
2635  is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;
2636 
2637  is->audio_stream = stream_index;
2638  is->audio_st = ic->streams[stream_index];
2639 
2640  if ((ret = decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread)) < 0)
2641  goto fail;
2642  if ((is->ic->iformat->flags & (AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH | AVFMT_NO_BYTE_SEEK)) && !is->ic->iformat->read_seek) {
2643  is->auddec.start_pts = is->audio_st->start_time;
2644  is->auddec.start_pts_tb = is->audio_st->time_base;
2645  }
2646  if ((ret = decoder_start(&is->auddec, audio_thread, "audio_decoder", is)) < 0)
2647  goto out;
2648  SDL_PauseAudioDevice(audio_dev, 0);
2649  break;
2650  case AVMEDIA_TYPE_VIDEO:
2651  is->video_stream = stream_index;
2652  is->video_st = ic->streams[stream_index];
2653 
2654  if ((ret = decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread)) < 0)
2655  goto fail;
2656  if ((ret = decoder_start(&is->viddec, video_thread, "video_decoder", is)) < 0)
2657  goto out;
2658  is->queue_attachments_req = 1;
2659  break;
2660  case AVMEDIA_TYPE_SUBTITLE:
2661  is->subtitle_stream = stream_index;
2662  is->subtitle_st = ic->streams[stream_index];
2663 
2664  if ((ret = decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread)) < 0)
2665  goto fail;
2666  if ((ret = decoder_start(&is->subdec, subtitle_thread, "subtitle_decoder", is)) < 0)
2667  goto out;
2668  break;
2669  default:
2670  break;
2671  }
2672  goto out;
2673 
2674 fail:
2675  avcodec_free_context(&avctx);
2676 out:
2677  av_channel_layout_uninit(&ch_layout);
2678  av_dict_free(&opts);
2679 
2680  return ret;
2681 }
2682 
2683 static int decode_interrupt_cb(void *ctx)
2684 {
2685  VideoState *is = ctx;
2686  return is->abort_request;
2687 }
2688 
2689 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2690  return stream_id < 0 ||
2691  queue->abort_request ||
2693  queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2694 }
2695 
2697 {
2698  if( !strcmp(s->iformat->name, "rtp")
2699  || !strcmp(s->iformat->name, "rtsp")
2700  || !strcmp(s->iformat->name, "sdp")
2701  )
2702  return 1;
2703 
2704  if(s->pb && ( !strncmp(s->url, "rtp:", 4)
2705  || !strncmp(s->url, "udp:", 4)
2706  )
2707  )
2708  return 1;
2709  return 0;
2710 }
2711 
2712 /* this thread gets the stream from the disk or the network */
2713 static int read_thread(void *arg)
2714 {
2715  VideoState *is = arg;
2716  AVFormatContext *ic = NULL;
2717  int err, i, ret;
2718  int st_index[AVMEDIA_TYPE_NB];
2719  AVPacket *pkt = NULL;
2720  int64_t stream_start_time;
2721  int pkt_in_play_range = 0;
2722  const AVDictionaryEntry *t;
2723  SDL_mutex *wait_mutex = SDL_CreateMutex();
2724  int scan_all_pmts_set = 0;
2725  int64_t pkt_ts;
2726 
2727  if (!wait_mutex) {
2728  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2729  ret = AVERROR(ENOMEM);
2730  goto fail;
2731  }
2732 
2733  memset(st_index, -1, sizeof(st_index));
2734  is->eof = 0;
2735 
2736  pkt = av_packet_alloc();
2737  if (!pkt) {
2738  av_log(NULL, AV_LOG_FATAL, "Could not allocate packet.\n");
2739  ret = AVERROR(ENOMEM);
2740  goto fail;
2741  }
2742  ic = avformat_alloc_context();
2743  if (!ic) {
2744  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2745  ret = AVERROR(ENOMEM);
2746  goto fail;
2747  }
2750  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2751  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2752  scan_all_pmts_set = 1;
2753  }
2754  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2755  if (err < 0) {
2756  print_error(is->filename, err);
2757  ret = -1;
2758  goto fail;
2759  }
2760  if (scan_all_pmts_set)
2761  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2762 
2764  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2766  goto fail;
2767  }
2768  is->ic = ic;
2769 
2770  if (genpts)
2771  ic->flags |= AVFMT_FLAG_GENPTS;
2772 
2774 
2775  if (find_stream_info) {
2777  int orig_nb_streams = ic->nb_streams;
2778 
2779  err = avformat_find_stream_info(ic, opts);
2780 
2781  for (i = 0; i < orig_nb_streams; i++)
2782  av_dict_free(&opts[i]);
2783  av_freep(&opts);
2784 
2785  if (err < 0) {
2787  "%s: could not find codec parameters\n", is->filename);
2788  ret = -1;
2789  goto fail;
2790  }
2791  }
2792 
2793  if (ic->pb)
2794  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2795 
2796  if (seek_by_bytes < 0)
2798  !!(ic->iformat->flags & AVFMT_TS_DISCONT) &&
2799  strcmp("ogg", ic->iformat->name);
2800 
2801  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2802 
2803  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2804  window_title = av_asprintf("%s - %s", t->value, input_filename);
2805 
2806  /* if seeking requested, we execute it */
2807  if (start_time != AV_NOPTS_VALUE) {
2808  int64_t timestamp;
2809 
2810  timestamp = start_time;
2811  /* add the stream start time */
2812  if (ic->start_time != AV_NOPTS_VALUE)
2813  timestamp += ic->start_time;
2814  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2815  if (ret < 0) {
2816  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2817  is->filename, (double)timestamp / AV_TIME_BASE);
2818  }
2819  }
2820 
2821  is->realtime = is_realtime(ic);
2822 
2823  if (show_status)
2824  av_dump_format(ic, 0, is->filename, 0);
2825 
2826  for (i = 0; i < ic->nb_streams; i++) {
2827  AVStream *st = ic->streams[i];
2828  enum AVMediaType type = st->codecpar->codec_type;
2829  st->discard = AVDISCARD_ALL;
2830  if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
2832  st_index[type] = i;
2833  }
2834  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2835  if (wanted_stream_spec[i] && st_index[i] == -1) {
2836  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2837  st_index[i] = INT_MAX;
2838  }
2839  }
2840 
2841  if (!video_disable)
2842  st_index[AVMEDIA_TYPE_VIDEO] =
2844  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2845  if (!audio_disable)
2846  st_index[AVMEDIA_TYPE_AUDIO] =
2848  st_index[AVMEDIA_TYPE_AUDIO],
2849  st_index[AVMEDIA_TYPE_VIDEO],
2850  NULL, 0);
2852  st_index[AVMEDIA_TYPE_SUBTITLE] =
2854  st_index[AVMEDIA_TYPE_SUBTITLE],
2855  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2856  st_index[AVMEDIA_TYPE_AUDIO] :
2857  st_index[AVMEDIA_TYPE_VIDEO]),
2858  NULL, 0);
2859 
2860  is->show_mode = show_mode;
2861  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2862  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2863  AVCodecParameters *codecpar = st->codecpar;
2865  if (codecpar->width)
2866  set_default_window_size(codecpar->width, codecpar->height, sar);
2867  }
2868 
2869  /* open the streams */
2870  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2872  }
2873 
2874  ret = -1;
2875  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2877  }
2878  if (is->show_mode == SHOW_MODE_NONE)
2879  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2880 
2881  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2883  }
2884 
2885  if (is->video_stream < 0 && is->audio_stream < 0) {
2886  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2887  is->filename);
2888  ret = -1;
2889  goto fail;
2890  }
2891 
2892  if (infinite_buffer < 0 && is->realtime)
2893  infinite_buffer = 1;
2894 
2895  for (;;) {
2896  if (is->abort_request)
2897  break;
2898  if (is->paused != is->last_paused) {
2899  is->last_paused = is->paused;
2900  if (is->paused)
2901  is->read_pause_return = av_read_pause(ic);
2902  else
2903  av_read_play(ic);
2904  }
2905 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2906  if (is->paused &&
2907  (!strcmp(ic->iformat->name, "rtsp") ||
2908  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2909  /* wait 10 ms to avoid trying to get another packet */
2910  /* XXX: horrible */
2911  SDL_Delay(10);
2912  continue;
2913  }
2914 #endif
2915  if (is->seek_req) {
2916  int64_t seek_target = is->seek_pos;
2917  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2918  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2919 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2920 // of the seek_pos/seek_rel variables
2921 
2922  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2923  if (ret < 0) {
2925  "%s: error while seeking\n", is->ic->url);
2926  } else {
2927  if (is->audio_stream >= 0)
2928  packet_queue_flush(&is->audioq);
2929  if (is->subtitle_stream >= 0)
2930  packet_queue_flush(&is->subtitleq);
2931  if (is->video_stream >= 0)
2932  packet_queue_flush(&is->videoq);
2933  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2934  set_clock(&is->extclk, NAN, 0);
2935  } else {
2936  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
2937  }
2938  }
2939  is->seek_req = 0;
2940  is->queue_attachments_req = 1;
2941  is->eof = 0;
2942  if (is->paused)
2944  }
2945  if (is->queue_attachments_req) {
2946  if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
2947  if ((ret = av_packet_ref(pkt, &is->video_st->attached_pic)) < 0)
2948  goto fail;
2949  packet_queue_put(&is->videoq, pkt);
2950  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
2951  }
2952  is->queue_attachments_req = 0;
2953  }
2954 
2955  /* if the queue are full, no need to read more */
2956  if (infinite_buffer<1 &&
2957  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2958  || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
2959  stream_has_enough_packets(is->video_st, is->video_stream, &is->videoq) &&
2960  stream_has_enough_packets(is->subtitle_st, is->subtitle_stream, &is->subtitleq)))) {
2961  /* wait 10 ms */
2962  SDL_LockMutex(wait_mutex);
2963  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2964  SDL_UnlockMutex(wait_mutex);
2965  continue;
2966  }
2967  if (!is->paused &&
2968  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
2969  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
2970  if (loop != 1 && (!loop || --loop)) {
2972  } else if (autoexit) {
2973  ret = AVERROR_EOF;
2974  goto fail;
2975  }
2976  }
2977  ret = av_read_frame(ic, pkt);
2978  if (ret < 0) {
2979  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
2980  if (is->video_stream >= 0)
2981  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
2982  if (is->audio_stream >= 0)
2983  packet_queue_put_nullpacket(&is->audioq, pkt, is->audio_stream);
2984  if (is->subtitle_stream >= 0)
2985  packet_queue_put_nullpacket(&is->subtitleq, pkt, is->subtitle_stream);
2986  is->eof = 1;
2987  }
2988  if (ic->pb && ic->pb->error) {
2989  if (autoexit)
2990  goto fail;
2991  else
2992  break;
2993  }
2994  SDL_LockMutex(wait_mutex);
2995  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2996  SDL_UnlockMutex(wait_mutex);
2997  continue;
2998  } else {
2999  is->eof = 0;
3000  }
3001  /* check if packet is in play range specified by user, then queue, otherwise discard */
3002  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3003  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3004  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3005  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3007  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3008  <= ((double)duration / 1000000);
3009  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3010  packet_queue_put(&is->audioq, pkt);
3011  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3012  && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
3013  packet_queue_put(&is->videoq, pkt);
3014  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3015  packet_queue_put(&is->subtitleq, pkt);
3016  } else {
3018  }
3019  }
3020 
3021  ret = 0;
3022  fail:
3023  if (ic && !is->ic)
3024  avformat_close_input(&ic);
3025 
3026  av_packet_free(&pkt);
3027  if (ret != 0) {
3028  SDL_Event event;
3029 
3030  event.type = FF_QUIT_EVENT;
3031  event.user.data1 = is;
3032  SDL_PushEvent(&event);
3033  }
3034  SDL_DestroyMutex(wait_mutex);
3035  return 0;
3036 }
3037 
3038 static VideoState *stream_open(const char *filename,
3039  const AVInputFormat *iformat)
3040 {
3041  VideoState *is;
3042 
3043  is = av_mallocz(sizeof(VideoState));
3044  if (!is)
3045  return NULL;
3046  is->last_video_stream = is->video_stream = -1;
3047  is->last_audio_stream = is->audio_stream = -1;
3048  is->last_subtitle_stream = is->subtitle_stream = -1;
3049  is->filename = av_strdup(filename);
3050  if (!is->filename)
3051  goto fail;
3052  is->iformat = iformat;
3053  is->ytop = 0;
3054  is->xleft = 0;
3055 
3056  /* start video display */
3057  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3058  goto fail;
3059  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3060  goto fail;
3061  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3062  goto fail;
3063 
3064  if (packet_queue_init(&is->videoq) < 0 ||
3065  packet_queue_init(&is->audioq) < 0 ||
3066  packet_queue_init(&is->subtitleq) < 0)
3067  goto fail;
3068 
3069  if (!(is->continue_read_thread = SDL_CreateCond())) {
3070  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3071  goto fail;
3072  }
3073 
3074  init_clock(&is->vidclk, &is->videoq.serial);
3075  init_clock(&is->audclk, &is->audioq.serial);
3076  init_clock(&is->extclk, &is->extclk.serial);
3077  is->audio_clock_serial = -1;
3078  if (startup_volume < 0)
3079  av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
3080  if (startup_volume > 100)
3081  av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
3083  startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
3084  is->audio_volume = startup_volume;
3085  is->muted = 0;
3086  is->av_sync_type = av_sync_type;
3087  is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
3088  if (!is->read_tid) {
3089  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3090 fail:
3091  stream_close(is);
3092  return NULL;
3093  }
3094  return is;
3095 }
3096 
3098 {
3099  AVFormatContext *ic = is->ic;
3100  int start_index, stream_index;
3101  int old_index;
3102  AVStream *st;
3103  AVProgram *p = NULL;
3104  int nb_streams = is->ic->nb_streams;
3105 
3106  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3107  start_index = is->last_video_stream;
3108  old_index = is->video_stream;
3109  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3110  start_index = is->last_audio_stream;
3111  old_index = is->audio_stream;
3112  } else {
3113  start_index = is->last_subtitle_stream;
3114  old_index = is->subtitle_stream;
3115  }
3116  stream_index = start_index;
3117 
3118  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3119  p = av_find_program_from_stream(ic, NULL, is->video_stream);
3120  if (p) {
3122  for (start_index = 0; start_index < nb_streams; start_index++)
3123  if (p->stream_index[start_index] == stream_index)
3124  break;
3125  if (start_index == nb_streams)
3126  start_index = -1;
3127  stream_index = start_index;
3128  }
3129  }
3130 
3131  for (;;) {
3132  if (++stream_index >= nb_streams)
3133  {
3135  {
3136  stream_index = -1;
3137  is->last_subtitle_stream = -1;
3138  goto the_end;
3139  }
3140  if (start_index == -1)
3141  return;
3142  stream_index = 0;
3143  }
3144  if (stream_index == start_index)
3145  return;
3146  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3147  if (st->codecpar->codec_type == codec_type) {
3148  /* check that parameters are OK */
3149  switch (codec_type) {
3150  case AVMEDIA_TYPE_AUDIO:
3151  if (st->codecpar->sample_rate != 0 &&
3152  st->codecpar->ch_layout.nb_channels != 0)
3153  goto the_end;
3154  break;
3155  case AVMEDIA_TYPE_VIDEO:
3156  case AVMEDIA_TYPE_SUBTITLE:
3157  goto the_end;
3158  default:
3159  break;
3160  }
3161  }
3162  }
3163  the_end:
3164  if (p && stream_index != -1)
3165  stream_index = p->stream_index[stream_index];
3166  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3168  old_index,
3169  stream_index);
3170 
3171  stream_component_close(is, old_index);
3172  stream_component_open(is, stream_index);
3173 }
3174 
3175 
3177 {
3179  SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
3180 }
3181 
3183 {
3184  int next = is->show_mode;
3185  do {
3186  next = (next + 1) % SHOW_MODE_NB;
3187  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3188  if (is->show_mode != next) {
3189  is->force_refresh = 1;
3190  is->show_mode = next;
3191  }
3192 }
3193 
3194 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3195  double remaining_time = 0.0;
3196  SDL_PumpEvents();
3197  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3199  SDL_ShowCursor(0);
3200  cursor_hidden = 1;
3201  }
3202  if (remaining_time > 0.0)
3203  av_usleep((int64_t)(remaining_time * 1000000.0));
3204  remaining_time = REFRESH_RATE;
3205  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3206  video_refresh(is, &remaining_time);
3207  SDL_PumpEvents();
3208  }
3209 }
3210 
3211 static void seek_chapter(VideoState *is, int incr)
3212 {
3213  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3214  int i;
3215 
3216  if (!is->ic->nb_chapters)
3217  return;
3218 
3219  /* find the current chapter */
3220  for (i = 0; i < is->ic->nb_chapters; i++) {
3221  AVChapter *ch = is->ic->chapters[i];
3222  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3223  i--;
3224  break;
3225  }
3226  }
3227 
3228  i += incr;
3229  i = FFMAX(i, 0);
3230  if (i >= is->ic->nb_chapters)
3231  return;
3232 
3233  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3234  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3235  AV_TIME_BASE_Q), 0, 0);
3236 }
3237 
3238 /* handle an event sent by the GUI */
3239 static void event_loop(VideoState *cur_stream)
3240 {
3241  SDL_Event event;
3242  double incr, pos, frac;
3243 
3244  for (;;) {
3245  double x;
3246  refresh_loop_wait_event(cur_stream, &event);
3247  switch (event.type) {
3248  case SDL_KEYDOWN:
3249  if (exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
3250  do_exit(cur_stream);
3251  break;
3252  }
3253  // If we don't yet have a window, skip all key events, because read_thread might still be initializing...
3254  if (!cur_stream->width)
3255  continue;
3256  switch (event.key.keysym.sym) {
3257  case SDLK_f:
3258  toggle_full_screen(cur_stream);
3259  cur_stream->force_refresh = 1;
3260  break;
3261  case SDLK_p:
3262  case SDLK_SPACE:
3263  toggle_pause(cur_stream);
3264  break;
3265  case SDLK_m:
3266  toggle_mute(cur_stream);
3267  break;
3268  case SDLK_KP_MULTIPLY:
3269  case SDLK_0:
3270  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3271  break;
3272  case SDLK_KP_DIVIDE:
3273  case SDLK_9:
3274  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3275  break;
3276  case SDLK_s: // S: Step to next frame
3277  step_to_next_frame(cur_stream);
3278  break;
3279  case SDLK_a:
3281  break;
3282  case SDLK_v:
3284  break;
3285  case SDLK_c:
3289  break;
3290  case SDLK_t:
3292  break;
3293  case SDLK_w:
3294  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3295  if (++cur_stream->vfilter_idx >= nb_vfilters)
3296  cur_stream->vfilter_idx = 0;
3297  } else {
3298  cur_stream->vfilter_idx = 0;
3299  toggle_audio_display(cur_stream);
3300  }
3301  break;
3302  case SDLK_PAGEUP:
3303  if (cur_stream->ic->nb_chapters <= 1) {
3304  incr = 600.0;
3305  goto do_seek;
3306  }
3307  seek_chapter(cur_stream, 1);
3308  break;
3309  case SDLK_PAGEDOWN:
3310  if (cur_stream->ic->nb_chapters <= 1) {
3311  incr = -600.0;
3312  goto do_seek;
3313  }
3314  seek_chapter(cur_stream, -1);
3315  break;
3316  case SDLK_LEFT:
3317  incr = seek_interval ? -seek_interval : -10.0;
3318  goto do_seek;
3319  case SDLK_RIGHT:
3320  incr = seek_interval ? seek_interval : 10.0;
3321  goto do_seek;
3322  case SDLK_UP:
3323  incr = 60.0;
3324  goto do_seek;
3325  case SDLK_DOWN:
3326  incr = -60.0;
3327  do_seek:
3328  if (seek_by_bytes) {
3329  pos = -1;
3330  if (pos < 0 && cur_stream->video_stream >= 0)
3331  pos = frame_queue_last_pos(&cur_stream->pictq);
3332  if (pos < 0 && cur_stream->audio_stream >= 0)
3333  pos = frame_queue_last_pos(&cur_stream->sampq);
3334  if (pos < 0)
3335  pos = avio_tell(cur_stream->ic->pb);
3336  if (cur_stream->ic->bit_rate)
3337  incr *= cur_stream->ic->bit_rate / 8.0;
3338  else
3339  incr *= 180000.0;
3340  pos += incr;
3341  stream_seek(cur_stream, pos, incr, 1);
3342  } else {
3343  pos = get_master_clock(cur_stream);
3344  if (isnan(pos))
3345  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3346  pos += incr;
3347  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3348  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3349  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3350  }
3351  break;
3352  default:
3353  break;
3354  }
3355  break;
3356  case SDL_MOUSEBUTTONDOWN:
3357  if (exit_on_mousedown) {
3358  do_exit(cur_stream);
3359  break;
3360  }
3361  if (event.button.button == SDL_BUTTON_LEFT) {
3362  static int64_t last_mouse_left_click = 0;
3363  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3364  toggle_full_screen(cur_stream);
3365  cur_stream->force_refresh = 1;
3366  last_mouse_left_click = 0;
3367  } else {
3368  last_mouse_left_click = av_gettime_relative();
3369  }
3370  }
3371  case SDL_MOUSEMOTION:
3372  if (cursor_hidden) {
3373  SDL_ShowCursor(1);
3374  cursor_hidden = 0;
3375  }
3377  if (event.type == SDL_MOUSEBUTTONDOWN) {
3378  if (event.button.button != SDL_BUTTON_RIGHT)
3379  break;
3380  x = event.button.x;
3381  } else {
3382  if (!(event.motion.state & SDL_BUTTON_RMASK))
3383  break;
3384  x = event.motion.x;
3385  }
3386  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3387  uint64_t size = avio_size(cur_stream->ic->pb);
3388  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3389  } else {
3390  int64_t ts;
3391  int ns, hh, mm, ss;
3392  int tns, thh, tmm, tss;
3393  tns = cur_stream->ic->duration / 1000000LL;
3394  thh = tns / 3600;
3395  tmm = (tns % 3600) / 60;
3396  tss = (tns % 60);
3397  frac = x / cur_stream->width;
3398  ns = frac * tns;
3399  hh = ns / 3600;
3400  mm = (ns % 3600) / 60;
3401  ss = (ns % 60);
3403  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3404  hh, mm, ss, thh, tmm, tss);
3405  ts = frac * cur_stream->ic->duration;
3406  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3407  ts += cur_stream->ic->start_time;
3408  stream_seek(cur_stream, ts, 0, 0);
3409  }
3410  break;
3411  case SDL_WINDOWEVENT:
3412  switch (event.window.event) {
3413  case SDL_WINDOWEVENT_SIZE_CHANGED:
3414  screen_width = cur_stream->width = event.window.data1;
3415  screen_height = cur_stream->height = event.window.data2;
3416  if (cur_stream->vis_texture) {
3417  SDL_DestroyTexture(cur_stream->vis_texture);
3418  cur_stream->vis_texture = NULL;
3419  }
3420  case SDL_WINDOWEVENT_EXPOSED:
3421  cur_stream->force_refresh = 1;
3422  }
3423  break;
3424  case SDL_QUIT:
3425  case FF_QUIT_EVENT:
3426  do_exit(cur_stream);
3427  break;
3428  default:
3429  break;
3430  }
3431  }
3432 }
3433 
3434 static int opt_width(void *optctx, const char *opt, const char *arg)
3435 {
3436  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3437  return 0;
3438 }
3439 
3440 static int opt_height(void *optctx, const char *opt, const char *arg)
3441 {
3442  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3443  return 0;
3444 }
3445 
3446 static int opt_format(void *optctx, const char *opt, const char *arg)
3447 {
3449  if (!file_iformat) {
3450  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3451  return AVERROR(EINVAL);
3452  }
3453  return 0;
3454 }
3455 
3456 static int opt_sync(void *optctx, const char *opt, const char *arg)
3457 {
3458  if (!strcmp(arg, "audio"))
3460  else if (!strcmp(arg, "video"))
3462  else if (!strcmp(arg, "ext"))
3464  else {
3465  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3466  exit(1);
3467  }
3468  return 0;
3469 }
3470 
3471 static int opt_seek(void *optctx, const char *opt, const char *arg)
3472 {
3473  start_time = parse_time_or_die(opt, arg, 1);
3474  return 0;
3475 }
3476 
3477 static int opt_duration(void *optctx, const char *opt, const char *arg)
3478 {
3479  duration = parse_time_or_die(opt, arg, 1);
3480  return 0;
3481 }
3482 
3483 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3484 {
3485  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3486  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3487  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3488  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3489  return 0;
3490 }
3491 
3492 static void opt_input_file(void *optctx, const char *filename)
3493 {
3494  if (input_filename) {
3496  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3497  filename, input_filename);
3498  exit(1);
3499  }
3500  if (!strcmp(filename, "-"))
3501  filename = "fd:";
3502  input_filename = filename;
3503 }
3504 
3505 static int opt_codec(void *optctx, const char *opt, const char *arg)
3506 {
3507  const char *spec = strchr(opt, ':');
3508  if (!spec) {
3510  "No media specifier was specified in '%s' in option '%s'\n",
3511  arg, opt);
3512  return AVERROR(EINVAL);
3513  }
3514  spec++;
3515  switch (spec[0]) {
3516  case 'a' : audio_codec_name = arg; break;
3517  case 's' : subtitle_codec_name = arg; break;
3518  case 'v' : video_codec_name = arg; break;
3519  default:
3521  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3522  return AVERROR(EINVAL);
3523  }
3524  return 0;
3525 }
3526 
3527 static int dummy;
3528 
3529 static const OptionDef options[] = {
3531  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3532  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3533  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3534  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3535  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3536  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3537  { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3538  { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3539  { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3540  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3541  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3542  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3543  { "seek_interval", OPT_FLOAT | HAS_ARG, { &seek_interval }, "set seek interval for left/right keys, in seconds", "seconds" },
3544  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3545  { "noborder", OPT_BOOL, { &borderless }, "borderless window" },
3546  { "alwaysontop", OPT_BOOL, { &alwaysontop }, "window always on top" },
3547  { "volume", OPT_INT | HAS_ARG, { &startup_volume}, "set startup volume 0=min 100=max", "volume" },
3548  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3549  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3550  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3551  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3552  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3553  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3554  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3555  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3556  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3557  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3558  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3559  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3560  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3561  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3562  { "left", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_left }, "set the x position for the left of the window", "x pos" },
3563  { "top", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_top }, "set the y position for the top of the window", "y pos" },
3564  { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3565  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3566  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3567  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3568  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3569  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3570  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3571  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3572  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3573  { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3574  { "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
3575  "read and decode the streams to fill missing information with heuristics" },
3576  { "filter_threads", HAS_ARG | OPT_INT | OPT_EXPERT, { &filter_nbthreads }, "number of filter threads per graph" },
3577  { NULL, },
3578 };
3579 
3580 static void show_usage(void)
3581 {
3582  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3583  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3584  av_log(NULL, AV_LOG_INFO, "\n");
3585 }
3586 
3587 void show_help_default(const char *opt, const char *arg)
3588 {
3590  show_usage();
3591  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3592  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3593  printf("\n");
3597  printf("\nWhile playing:\n"
3598  "q, ESC quit\n"
3599  "f toggle full screen\n"
3600  "p, SPC pause\n"
3601  "m toggle mute\n"
3602  "9, 0 decrease and increase volume respectively\n"
3603  "/, * decrease and increase volume respectively\n"
3604  "a cycle audio channel in the current program\n"
3605  "v cycle video channel\n"
3606  "t cycle subtitle channel in the current program\n"
3607  "c cycle program\n"
3608  "w cycle video filters or show modes\n"
3609  "s activate frame-step mode\n"
3610  "left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
3611  "down/up seek backward/forward 1 minute\n"
3612  "page down/page up seek backward/forward 10 minutes\n"
3613  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3614  "left double-click toggle full screen\n"
3615  );
3616 }
3617 
3618 /* Called from the main */
3619 int main(int argc, char **argv)
3620 {
3621  int flags;
3622  VideoState *is;
3623 
3624  init_dynload();
3625 
3627  parse_loglevel(argc, argv, options);
3628 
3629  /* register all codecs, demux and protocols */
3630 #if CONFIG_AVDEVICE
3632 #endif
3634 
3635  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3636  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3637 
3638  show_banner(argc, argv, options);
3639 
3640  parse_options(NULL, argc, argv, options, opt_input_file);
3641 
3642  if (!input_filename) {
3643  show_usage();
3644  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3646  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3647  exit(1);
3648  }
3649 
3650  if (display_disable) {
3651  video_disable = 1;
3652  }
3653  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3654  if (audio_disable)
3655  flags &= ~SDL_INIT_AUDIO;
3656  else {
3657  /* Try to work around an occasional ALSA buffer underflow issue when the
3658  * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3659  if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3660  SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
3661  }
3662  if (display_disable)
3663  flags &= ~SDL_INIT_VIDEO;
3664  if (SDL_Init (flags)) {
3665  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3666  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3667  exit(1);
3668  }
3669 
3670  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3671  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3672 
3673  if (!display_disable) {
3674  int flags = SDL_WINDOW_HIDDEN;
3675  if (alwaysontop)
3676 #if SDL_VERSION_ATLEAST(2,0,5)
3677  flags |= SDL_WINDOW_ALWAYS_ON_TOP;
3678 #else
3679  av_log(NULL, AV_LOG_WARNING, "Your SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.\n");
3680 #endif
3681  if (borderless)
3682  flags |= SDL_WINDOW_BORDERLESS;
3683  else
3684  flags |= SDL_WINDOW_RESIZABLE;
3685 
3686 #ifdef SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR
3687  SDL_SetHint(SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR, "0");
3688 #endif
3689  window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
3690  SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
3691  if (window) {
3692  renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
3693  if (!renderer) {
3694  av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
3695  renderer = SDL_CreateRenderer(window, -1, 0);
3696  }
3697  if (renderer) {
3698  if (!SDL_GetRendererInfo(renderer, &renderer_info))
3699  av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", renderer_info.name);
3700  }
3701  }
3702  if (!window || !renderer || !renderer_info.num_texture_formats) {
3703  av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError());
3704  do_exit(NULL);
3705  }
3706  }
3707 
3709  if (!is) {
3710  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3711  do_exit(NULL);
3712  }
3713 
3714  event_loop(is);
3715 
3716  /* never returns */
3717 
3718  return 0;
3719 }
OPT_FLOAT
#define OPT_FLOAT
Definition: cmdutils.h:156
AVSubtitle
Definition: avcodec.h:2342
rect::w
int w
Definition: f_ebur128.c:76
sws_getCachedContext
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2490
do_exit
static void do_exit(VideoState *is)
Definition: ffplay.c:1270
VideoState::seek_rel
int64_t seek_rel
Definition: ffplay.c:214
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:422
AVCodec
AVCodec.
Definition: codec.h:184
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
VideoState::video_st
AVStream * video_st
Definition: ffplay.c:282
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:219
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
Frame::width
int width
Definition: ffplay.c:161
AVFMT_NO_BYTE_SEEK
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:487
av_clip
#define av_clip
Definition: common.h:95
AudioParams::fmt
enum AVSampleFormat fmt
Definition: ffplay.c:134
av_sync_type
static int av_sync_type
Definition: ffplay.c:325
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
renderer_info
static SDL_RendererInfo renderer_info
Definition: ffplay.c:360
FrameData::pkt_pos
int64_t pkt_pos
Definition: ffplay.c:150
frame_queue_nb_remaining
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:791
VideoState::agraph
AVFilterGraph * agraph
Definition: ffplay.c:297
configure_audio_filters
static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
Definition: ffplay.c:1932
opt_add_vfilter
static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:389
frame_queue_next
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:775
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:58
Decoder::finished
int finished
Definition: ffplay.c:193
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:881
av_compare_ts
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
FrameData
Definition: ffmpeg.c:117
AVProgram::nb_stream_indexes
unsigned int nb_stream_indexes
Definition: avformat.h:1033
frame_queue_last_pos
static int64_t frame_queue_last_pos(FrameQueue *f)
Definition: ffplay.c:797
out
FILE * out
Definition: movenc.c:54
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1038
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
video_thread
static int video_thread(void *arg)
Definition: ffplay.c:2095
VideoState::av_sync_type
int av_sync_type
Definition: ffplay.c:233
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:690
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
set_default_window_size
static void set_default_window_size(int width, int height, AVRational sar)
Definition: ffplay.c:1294
AV_PIX_FMT_BGR32
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:434
AV_NOSYNC_THRESHOLD
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:86
AVFormatContext::nb_chapters
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1319
AVCodecParameters
This struct describes the properties of an encoded stream.
Definition: codec_par.h:54
AV_LOG_QUIET
#define AV_LOG_QUIET
Print no output.
Definition: log.h:162
FrameQueue::keep_last
int keep_last
Definition: ffplay.c:175
sub
static float sub(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:31
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:947
VideoState::audio_hw_buf_size
int audio_hw_buf_size
Definition: ffplay.c:243
decoder_decode_frame
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
Definition: ffplay.c:568
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AV_DISPOSITION_ATTACHED_PIC
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:769
FrameQueue::cond
SDL_cond * cond
Definition: ffplay.c:178
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:146
AVStream::discard
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:909
av_find_program_from_stream
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: avformat.c:363
display_disable
static int display_disable
Definition: ffplay.c:320
screen_width
static int screen_width
Definition: ffplay.c:310
DEBUG
#define DEBUG
Definition: vf_framerate.c:29
sws_dict
AVDictionary * sws_dict
Definition: cmdutils.c:58
swr_set_compensation
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:1015
SAMPLE_ARRAY_SIZE
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:104
rect
Definition: f_ebur128.c:76
update_volume
static void update_volume(VideoState *is, int sign, double step)
Definition: ffplay.c:1480
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
VideoState::auddec
Decoder auddec
Definition: ffplay.c:227
screen_left
static int screen_left
Definition: ffplay.c:312
av_opt_set_int_list
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:721
AudioParams::frame_size
int frame_size
Definition: ffplay.c:135
AVSubtitleRect
Definition: avcodec.h:2314
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:116
Decoder::next_pts
int64_t next_pts
Definition: ffplay.c:198
decoder_start
static int decoder_start(Decoder *d, int(*fn)(void *), const char *thread_name, void *arg)
Definition: ffplay.c:2084
rect::y
int y
Definition: f_ebur128.c:76
FrameQueue::size
int size
Definition: ffplay.c:173
avformat_get_class
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:203
av_unused
#define av_unused
Definition: attributes.h:131
Frame::sar
AVRational sar
Definition: ffplay.c:164
out_size
int out_size
Definition: movenc.c:55
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:103
VideoState::vis_texture
SDL_Texture * vis_texture
Definition: ffplay.c:270
queue_picture
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
Definition: ffplay.c:1714
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
AudioParams
Definition: ffplay.c:131
VideoState::subtitle_st
AVStream * subtitle_st
Definition: ffplay.c:275
VideoState::audio_filter_src
struct AudioParams audio_filter_src
Definition: ffplay.c:253
pixdesc.h
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1172
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
VideoState::frame_last_filter_delay
double frame_last_filter_delay
Definition: ffplay.c:280
AVFrame::width
int width
Definition: frame.h:402
VideoState::xleft
int xleft
Definition: ffplay.c:289
Frame::pts
double pts
Definition: ffplay.c:158
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:661
OPT_INPUT
#define OPT_INPUT
Definition: cmdutils.h:166
frame_queue_init
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
Definition: ffplay.c:678
subtitle_codec_name
static const char * subtitle_codec_name
Definition: ffplay.c:340
EXTERNAL_CLOCK_MIN_FRAMES
#define EXTERNAL_CLOCK_MIN_FRAMES
Definition: ffplay.c:68
HAS_ARG
#define HAS_ARG
Definition: cmdutils.h:149
b
#define b
Definition: input.c:41
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:156
AVChapter::start
int64_t start
Definition: avformat.h:1066
Clock
Definition: ffplay.c:139
data
const char data[16]
Definition: mxf.c:146
frame_queue_destory
static void frame_queue_destory(FrameQueue *f)
Definition: ffplay.c:699
SAMPLE_QUEUE_SIZE
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:128
program_name
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:63
Decoder::queue
PacketQueue * queue
Definition: ffplay.c:190
format_opts
AVDictionary * format_opts
Definition: cmdutils.c:60
AVSEEK_FLAG_BYTE
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2195
AVIOContext::error
int error
contains the error code or 0 if no error happened
Definition: avio.h:245
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:75
AV_PIX_FMT_RGB32_1
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:433
VideoState::audio_diff_avg_coef
double audio_diff_avg_coef
Definition: ffplay.c:238
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
CURSOR_HIDE_DELAY
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:106
SDL_VOLUME_STEP
#define SDL_VOLUME_STEP
Definition: ffplay.c:77
show_help_children
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
Definition: cmdutils.c:169
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
autorotate
static int autorotate
Definition: ffplay.c:348
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:392
TextureFormatEntry::texture_fmt
int texture_fmt
Definition: ffplay.c:365
video_disable
static int video_disable
Definition: ffplay.c:315
Frame::uploaded
int uploaded
Definition: ffplay.c:165
mathematics.h
sws_scale
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:1203
AVDictionary
Definition: dict.c:32
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:306
compute_target_delay
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1495
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Frame
Definition: ffplay.c:154
stream_close
static void stream_close(VideoState *is)
Definition: ffplay.c:1234
av_read_frame
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: demux.c:1439
SDL_AUDIO_MAX_CALLBACKS_PER_SEC
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:74
AVFMT_NOBINSEARCH
#define AVFMT_NOBINSEARCH
Format does not allow to fall back on binary search via read_timestamp.
Definition: avformat.h:485
VideoState::paused
int paused
Definition: ffplay.c:208
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:311
init_clock
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1376
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
AV_OPT_FLAG_FILTERING_PARAM
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:297
opt_seek
static int opt_seek(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3471
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:344
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:104
get_master_clock
static double get_master_clock(VideoState *is)
Definition: ffplay.c:1409
VideoState::width
int width
Definition: ffplay.c:289
file_iformat
static const AVInputFormat * file_iformat
Definition: ffplay.c:305
sample_rate
sample_rate
Definition: ffmpeg_filter.c:156
dummy
static int dummy
Definition: ffplay.c:3527
FF_QUIT_EVENT
#define FF_QUIT_EVENT
Definition: ffplay.c:356
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:73
PacketQueue
Definition: ffplay.c:115
subtitle_thread
static int subtitle_thread(void *arg)
Definition: ffplay.c:2198
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:732
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:351
VideoState::last_subtitle_stream
int last_subtitle_stream
Definition: ffplay.c:299
VideoState::SHOW_MODE_NONE
@ SHOW_MODE_NONE
Definition: ffplay.c:260
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:119
OptionDef
Definition: cmdutils.h:146
audio_decode_frame
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2301
subtitle_disable
static int subtitle_disable
Definition: ffplay.c:316
VideoState::pictq
FrameQueue pictq
Definition: ffplay.c:223
genpts
static int genpts
Definition: ffplay.c:329
VideoState::swr_ctx
struct SwrContext * swr_ctx
Definition: ffplay.c:255
opt_sync
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3456
av_find_best_stream
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, const AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
Definition: avformat.c:414
step_to_next_frame
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1487
upload_texture
static int upload_texture(SDL_Texture **tex, AVFrame *frame)
Definition: ffplay.c:895
VideoState::sampq
FrameQueue sampq
Definition: ffplay.c:225
TextureFormatEntry::format
enum AVPixelFormat format
Definition: ffplay.c:364
FrameQueue::rindex
int rindex
Definition: ffplay.c:171
video_display
static void video_display(VideoState *is)
Definition: ffplay.c:1330
AVCodec::max_lowres
uint8_t max_lowres
maximum value for lowres supported by the decoder
Definition: codec.h:204
avformat_close_input
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: demux.c:369
parse_number_or_die
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max)
Parse a string and return its corresponding value as a double.
Definition: cmdutils.c:107
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:593
AVFormatContext::interrupt_callback
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1368
SDL_AUDIO_MIN_BUFFER_SIZE
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:72
print_error
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:799
startup_volume
static int startup_volume
Definition: ffplay.c:323
window
static SDL_Window * window
Definition: ffplay.c:358
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
Definition: avfiltergraph.c:138
fifo.h
toggle_full_screen
static void toggle_full_screen(VideoState *is)
Definition: ffplay.c:3176
Clock::queue_serial
int * queue_serial
Definition: ffplay.c:146
VideoState::extclk
Clock extclk
Definition: ffplay.c:221
VideoState::seek_flags
int seek_flags
Definition: ffplay.c:212
alwaysontop
static int alwaysontop
Definition: ffplay.c:322
VideoState::audio_st
AVStream * audio_st
Definition: ffplay.c:241
packet_queue_init
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:459
AUDIO_DIFF_AVG_NB
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:97
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:2066
fail
#define fail()
Definition: checkasm.h:134
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:82
opt_duration
static int opt_duration(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3477
fn
#define fn(a)
Definition: afir_template.c:51
FrameQueue
Definition: ffplay.c:169
packet_queue_put
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:430
samplefmt.h
AVSubtitleRect::x
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2315
VideoState::video_stream
int video_stream
Definition: ffplay.c:281
autoexit
static int autoexit
Definition: ffplay.c:332
avio_tell
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:500
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
AVChapter
Definition: avformat.h:1063
video_image_display
static void video_image_display(VideoState *is)
Definition: ffplay.c:945
val
static double val(void *priv, double ch)
Definition: aeval.c:77
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:749
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
opt_show_mode
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3483
Decoder::empty_queue_cond
SDL_cond * empty_queue_cond
Definition: ffplay.c:195
pts
static int64_t pts
Definition: transcode_aac.c:653
set_clock_speed
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1370
VideoState::audio_diff_threshold
double audio_diff_threshold
Definition: ffplay.c:239
AV_PKT_DATA_DISPLAYMATRIX
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:109
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:487
VideoState::audio_diff_cum
double audio_diff_cum
Definition: ffplay.c:237
VideoState::last_video_stream
int last_video_stream
Definition: ffplay.c:299
OPT_STRING
#define OPT_STRING
Definition: cmdutils.h:152
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:260
fast
static int fast
Definition: ffplay.c:328
loop
static int loop
Definition: ffplay.c:335
AVRational::num
int num
Numerator.
Definition: rational.h:59
avformat_network_init
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:557
VideoState::rdft_bits
int rdft_bits
Definition: ffplay.c:266
opt_height
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3440
avsubtitle_free
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: avcodec.c:409
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:91
sdl_texture_format_map
static const struct TextureFormatEntry sdl_texture_format_map[]
AVFormatContext::bit_rate
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1213
is_full_screen
static int is_full_screen
Definition: ffplay.c:353
avcodec_decode_subtitle2
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, const AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:868
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:75
set_sdl_yuv_conversion_mode
static void set_sdl_yuv_conversion_mode(AVFrame *frame)
Definition: ffplay.c:929
lrint
#define lrint
Definition: tablegen.h:53
Frame::flip_v
int flip_v
Definition: ffplay.c:166
av_guess_sample_aspect_ratio
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio.
Definition: avformat.c:656
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:206
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
AVFormatContext::metadata
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1330
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
AVInputFormat
Definition: avformat.h:546
audio_thread
static int audio_thread(void *arg)
Definition: ffplay.c:2006
set_clock
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1364
av_dump_format
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
Definition: dump.c:629
VideoState
Definition: ffplay.c:203
frame_queue_peek_next
static Frame * frame_queue_peek_next(FrameQueue *f)
Definition: ffplay.c:723
sdl_audio_callback
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
Definition: ffplay.c:2410
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
sync_clock_to_slave
static void sync_clock_to_slave(Clock *c, Clock *slave)
Definition: ffplay.c:1384
swr_init
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:193
opt_input_file
static void opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3492
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
avformat_open_input
int avformat_open_input(AVFormatContext **ps, const char *url, const AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: demux.c:221
frame_queue_signal
static void frame_queue_signal(FrameQueue *f)
Definition: ffplay.c:711
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:60
av_channel_layout_describe
int av_channel_layout_describe(const AVChannelLayout *channel_layout, char *buf, size_t buf_size)
Get a human-readable string describing the channel layout properties.
Definition: channel_layout.c:778
OPT_INT
#define OPT_INT
Definition: cmdutils.h:155
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:153
width
#define width
VideoState::ShowMode
ShowMode
Definition: ffplay.c:259
Decoder::avctx
AVCodecContext * avctx
Definition: ffplay.c:191
s
#define s(width, name)
Definition: cbs_vp9.c:256
show_help_default
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3587
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:50
default_height
static int default_height
Definition: ffplay.c:309
AVFormatContext::flags
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1222
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
AVCodecParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only.
Definition: codec_par.h:138
AVInputFormat::name
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:551
AVFormatContext::iformat
const struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1116
AV_PIX_FMT_0BGR32
#define AV_PIX_FMT_0BGR32
Definition: pixfmt.h:437
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:594
AVDictionaryEntry::key
char * key
Definition: dict.h:90
Clock::last_updated
double last_updated
Definition: ffplay.c:142
PacketQueue::duration
int64_t duration
Definition: ffplay.c:119
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
AVSubtitleRect::y
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2316
AVCodecParameters::width
int width
Video only.
Definition: codec_par.h:128
video_stream
static AVStream * video_stream
Definition: demux_decode.c:42
calculate_display_rect
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
Definition: ffplay.c:850
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
screen_height
static int screen_height
Definition: ffplay.c:311
EXTERNAL_CLOCK_SPEED_STEP
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:94
Decoder::pkt_serial
int pkt_serial
Definition: ffplay.c:192
configure_video_filters
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
Definition: ffplay.c:1821
avcodec_receive_frame
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder or encoder (when the AV_CODEC_FLAG_RECON_FRAME flag is used...
Definition: avcodec.c:709
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
AVMEDIA_TYPE_NB
@ AVMEDIA_TYPE_NB
Definition: avutil.h:206
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:172
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
av_read_play
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: demux_utils.c:202
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
opt_codec
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3505
Clock::pts_drift
double pts_drift
Definition: ffplay.c:141
VideoState::videoq
PacketQueue videoq
Definition: ffplay.c:283
ctx
AVFormatContext * ctx
Definition: movenc.c:48
channels
channels
Definition: aptx.h:31
limits.h
REFRESH_RATE
#define REFRESH_RATE
Definition: ffplay.c:100
FrameQueue::rindex_shown
int rindex_shown
Definition: ffplay.c:176
nb_streams
static int nb_streams
Definition: ffprobe.c:315
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
VideoState::force_refresh
int force_refresh
Definition: ffplay.c:207
get_clock
static double get_clock(Clock *c)
Definition: ffplay.c:1344
screen_top
static int screen_top
Definition: ffplay.c:313
VideoState::audio_diff_avg_count
int audio_diff_avg_count
Definition: ffplay.c:240
EXTERNAL_CLOCK_SPEED_MIN
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:92
parse_options
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:351
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
renderer
static SDL_Renderer * renderer
Definition: ffplay.c:359
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
SwrContext
The libswresample context.
Definition: swresample_internal.h:95
vp_duration
static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp)
Definition: ffplay.c:1525
NAN
#define NAN
Definition: mathematics.h:64
AV_PIX_FMT_BGR32_1
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:435
VideoState::step
int step
Definition: ffplay.c:290
av_rdft_calc
void av_rdft_calc(RDFTContext *s, FFTSample *data)
synchronize_audio
static int synchronize_audio(VideoState *is, int nb_samples)
Definition: ffplay.c:2253
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:436
arg
const char * arg
Definition: jacosubdec.c:67
if
if(ret)
Definition: filter_design.txt:179
window_title
static const char * window_title
Definition: ffplay.c:307
Clock::speed
double speed
Definition: ffplay.c:143
VideoState::SHOW_MODE_VIDEO
@ SHOW_MODE_VIDEO
Definition: ffplay.c:260
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:76
AVFormatContext
Format I/O context.
Definition: avformat.h:1104
av_log_get_level
int av_log_get_level(void)
Get the current log level.
Definition: log.c:437
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:611
init_dynload
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:77
opts
AVDictionary * opts
Definition: movenc.c:50
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:861
AVSubtitleRect::w
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:2317
seek_chapter
static void seek_chapter(VideoState *is, int incr)
Definition: ffplay.c:3211
get_master_sync_type
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1392
avcodec_get_class
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:187
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1167
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:877
NULL
#define NULL
Definition: coverity.c:32
avcodec_find_decoder_by_name
const AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
Definition: allcodecs.c:992
FrameQueue::max_size
int max_size
Definition: ffplay.c:174
AV_DICT_MULTIKEY
#define AV_DICT_MULTIKEY
Allow to store several equal keys in the dictionary.
Definition: dict.h:84
OPT_EXPERT
#define OPT_EXPERT
Definition: cmdutils.h:151
AV_SYNC_AUDIO_MASTER
@ AV_SYNC_AUDIO_MASTER
Definition: ffplay.c:183
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
Decoder
Definition: ffplay.c:188
AudioParams::freq
int freq
Definition: ffplay.c:132
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:168
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
isnan
#define isnan(x)
Definition: libm.h:340
AudioParams::ch_layout
AVChannelLayout ch_layout
Definition: ffplay.c:133
audio_open
static int audio_open(void *opaque, AVChannelLayout *wanted_channel_layout, int wanted_sample_rate, struct AudioParams *audio_hw_params)
Definition: ffplay.c:2453
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:857
stream_cycle_channel
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3097
VideoState::frame_drops_late
int frame_drops_late
Definition: ffplay.c:257
AV_CODEC_FLAG2_FAST
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:336
VideoState::rdft
RDFTContext * rdft
Definition: ffplay.c:265
AVFormatContext::pb
AVIOContext * pb
I/O context.
Definition: avformat.h:1146
av_log_set_flags
void av_log_set_flags(int arg)
Definition: log.c:447
parseutils.h
frame_queue_unref_item
static void frame_queue_unref_item(Frame *vp)
Definition: ffplay.c:672
FrameQueue::queue
Frame queue[FRAME_QUEUE_SIZE]
Definition: ffplay.c:170
VideoState::last_i_start
int last_i_start
Definition: ffplay.c:264
Decoder::packet_pending
int packet_pending
Definition: ffplay.c:194
cursor_last_shown
static int64_t cursor_last_shown
Definition: ffplay.c:343
AVProgram::stream_index
unsigned int * stream_index
Definition: avformat.h:1032
frame_queue_peek
static Frame * frame_queue_peek(FrameQueue *f)
Definition: ffplay.c:718
OPT_INT64
#define OPT_INT64
Definition: cmdutils.h:158
avfilter_inout_alloc
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
Definition: graphparser.c:70
Frame::duration
double duration
Definition: ffplay.c:159
DFT_R2C
@ DFT_R2C
Definition: avfft.h:72
lowres
static int lowres
Definition: ffplay.c:330
double
double
Definition: af_crystalizer.c:132
AV_DICT_DONT_OVERWRITE
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:81
AV_PIX_FMT_RGB8
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
Definition: pixfmt.h:86
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:115
time.h
Frame::pos
int64_t pos
Definition: ffplay.c:160
FFTSample
float FFTSample
Definition: avfft.h:35
avfft.h
VideoState::frame_last_returned_time
double frame_last_returned_time
Definition: ffplay.c:279
set_clock_at
static void set_clock_at(Clock *c, double pts, int serial, double time)
Definition: ffplay.c:1356
toggle_pause
static void toggle_pause(VideoState *is)
Definition: ffplay.c:1469
TextureFormatEntry
Definition: ffplay.c:363
AVFilterGraph
Definition: avfilter.h:855
stream_component_open
static int stream_component_open(VideoState *is, int stream_index)
Definition: ffplay.c:2530
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
av_packet_ref
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:430
fp
#define fp
Definition: regdef.h:44