FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
34 #include "libavutil/eval.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/avassert.h"
42 #include "libavutil/time.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavcodec/avfft.h"
49 
50 #if CONFIG_AVFILTER
51 # include "libavfilter/avfilter.h"
52 # include "libavfilter/buffersink.h"
53 # include "libavfilter/buffersrc.h"
54 #endif
55 
56 #include <SDL.h>
57 #include <SDL_thread.h>
58 
59 #include "cmdutils.h"
60 
61 #include <assert.h>
62 
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65 
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 25
68 #define EXTERNAL_CLOCK_MIN_FRAMES 2
69 #define EXTERNAL_CLOCK_MAX_FRAMES 10
70 
71 /* Minimum SDL audio buffer size, in samples. */
72 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
73 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
74 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
75 
76 /* Step size for volume control */
77 #define SDL_VOLUME_STEP (SDL_MIX_MAXVOLUME / 50)
78 
79 /* no AV sync correction is done if below the minimum AV sync threshold */
80 #define AV_SYNC_THRESHOLD_MIN 0.04
81 /* AV sync correction is done if above the maximum AV sync threshold */
82 #define AV_SYNC_THRESHOLD_MAX 0.1
83 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
84 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
85 /* no AV correction is done if too big error */
86 #define AV_NOSYNC_THRESHOLD 10.0
87 
88 /* maximum audio speed change to get correct sync */
89 #define SAMPLE_CORRECTION_PERCENT_MAX 10
90 
91 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
92 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
93 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
94 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
95 
96 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
97 #define AUDIO_DIFF_AVG_NB 20
98 
99 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
100 #define REFRESH_RATE 0.01
101 
102 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
103 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
104 #define SAMPLE_ARRAY_SIZE (8 * 65536)
105 
106 #define CURSOR_HIDE_DELAY 1000000
107 
108 #define USE_ONEPASS_SUBTITLE_RENDER 1
109 
110 static unsigned sws_flags = SWS_BICUBIC;
111 
112 typedef struct MyAVPacketList {
115  int serial;
117 
118 typedef struct PacketQueue {
121  int size;
122  int64_t duration;
124  int serial;
125  SDL_mutex *mutex;
126  SDL_cond *cond;
127 } PacketQueue;
128 
129 #define VIDEO_PICTURE_QUEUE_SIZE 3
130 #define SUBPICTURE_QUEUE_SIZE 16
131 #define SAMPLE_QUEUE_SIZE 9
132 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
133 
134 typedef struct AudioParams {
135  int freq;
136  int channels;
137  int64_t channel_layout;
141 } AudioParams;
142 
143 typedef struct Clock {
144  double pts; /* clock base */
145  double pts_drift; /* clock base minus time at which we updated the clock */
146  double last_updated;
147  double speed;
148  int serial; /* clock is based on a packet with this serial */
149  int paused;
150  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
151 } Clock;
152 
153 /* Common struct for handling all types of decoded data and allocated render buffers. */
154 typedef struct Frame {
157  int serial;
158  double pts; /* presentation timestamp for the frame */
159  double duration; /* estimated duration of the frame */
160  int64_t pos; /* byte position of the frame in the input file */
161  SDL_Texture *bmp;
163  int width;
164  int height;
165  int format;
167  int uploaded;
168  int flip_v;
169 } Frame;
170 
171 typedef struct FrameQueue {
173  int rindex;
174  int windex;
175  int size;
176  int max_size;
179  SDL_mutex *mutex;
180  SDL_cond *cond;
182 } FrameQueue;
183 
184 enum {
185  AV_SYNC_AUDIO_MASTER, /* default choice */
187  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
188 };
189 
190 typedef struct Decoder {
196  int finished;
198  SDL_cond *empty_queue_cond;
199  int64_t start_pts;
201  int64_t next_pts;
203  SDL_Thread *decoder_tid;
204 } Decoder;
205 
206 typedef struct VideoState {
207  SDL_Thread *read_tid;
211  int paused;
214  int seek_req;
216  int64_t seek_pos;
217  int64_t seek_rel;
220  int realtime;
221 
225 
229 
233 
235 
237 
238  double audio_clock;
240  double audio_diff_cum; /* used for AV difference average computation */
249  unsigned int audio_buf_size; /* in bytes */
250  unsigned int audio_buf1_size;
251  int audio_buf_index; /* in bytes */
254  int muted;
256 #if CONFIG_AVFILTER
257  struct AudioParams audio_filter_src;
258 #endif
263 
264  enum ShowMode {
266  } show_mode;
273  int xpos;
275  SDL_Texture *vis_texture;
276  SDL_Texture *sub_texture;
277 
281 
282  double frame_timer;
288  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
291  int eof;
292 
293  char *filename;
295  int step;
296 
297 #if CONFIG_AVFILTER
298  int vfilter_idx;
299  AVFilterContext *in_video_filter; // the first filter in the video chain
300  AVFilterContext *out_video_filter; // the last filter in the video chain
301  AVFilterContext *in_audio_filter; // the first filter in the audio chain
302  AVFilterContext *out_audio_filter; // the last filter in the audio chain
303  AVFilterGraph *agraph; // audio filter graph
304 #endif
305 
307 
309 } VideoState;
310 
311 /* options specified by the user */
313 static const char *input_filename;
314 static const char *window_title;
315 static int default_width = 640;
316 static int default_height = 480;
317 static int screen_width = 0;
318 static int screen_height = 0;
319 static int audio_disable;
320 static int video_disable;
321 static int subtitle_disable;
322 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
323 static int seek_by_bytes = -1;
324 static int display_disable;
325 static int show_status = 1;
327 static int64_t start_time = AV_NOPTS_VALUE;
328 static int64_t duration = AV_NOPTS_VALUE;
329 static int fast = 0;
330 static int genpts = 0;
331 static int lowres = 0;
332 static int decoder_reorder_pts = -1;
333 static int autoexit;
334 static int exit_on_keydown;
335 static int exit_on_mousedown;
336 static int loop = 1;
337 static int framedrop = -1;
338 static int infinite_buffer = -1;
339 static enum ShowMode show_mode = SHOW_MODE_NONE;
340 static const char *audio_codec_name;
341 static const char *subtitle_codec_name;
342 static const char *video_codec_name;
343 double rdftspeed = 0.02;
344 static int64_t cursor_last_shown;
345 static int cursor_hidden = 0;
346 #if CONFIG_AVFILTER
347 static const char **vfilters_list = NULL;
348 static int nb_vfilters = 0;
349 static char *afilters = NULL;
350 #endif
351 static int autorotate = 1;
352 
353 /* current context */
354 static int is_full_screen;
355 static int64_t audio_callback_time;
356 
358 
359 #define FF_ALLOC_EVENT (SDL_USEREVENT)
360 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
361 
362 static SDL_Window *window;
363 static SDL_Renderer *renderer;
364 
365 #if CONFIG_AVFILTER
366 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
367 {
368  GROW_ARRAY(vfilters_list, nb_vfilters);
369  vfilters_list[nb_vfilters - 1] = arg;
370  return 0;
371 }
372 #endif
373 
374 static inline
375 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
376  enum AVSampleFormat fmt2, int64_t channel_count2)
377 {
378  /* If channel count == 1, planar and non-planar formats are the same */
379  if (channel_count1 == 1 && channel_count2 == 1)
381  else
382  return channel_count1 != channel_count2 || fmt1 != fmt2;
383 }
384 
385 static inline
386 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
387 {
388  if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
389  return channel_layout;
390  else
391  return 0;
392 }
393 
394 static void free_picture(Frame *vp);
395 
397 {
398  MyAVPacketList *pkt1;
399 
400  if (q->abort_request)
401  return -1;
402 
403  pkt1 = av_malloc(sizeof(MyAVPacketList));
404  if (!pkt1)
405  return -1;
406  pkt1->pkt = *pkt;
407  pkt1->next = NULL;
408  if (pkt == &flush_pkt)
409  q->serial++;
410  pkt1->serial = q->serial;
411 
412  if (!q->last_pkt)
413  q->first_pkt = pkt1;
414  else
415  q->last_pkt->next = pkt1;
416  q->last_pkt = pkt1;
417  q->nb_packets++;
418  q->size += pkt1->pkt.size + sizeof(*pkt1);
419  q->duration += pkt1->pkt.duration;
420  /* XXX: should duplicate packet data in DV case */
421  SDL_CondSignal(q->cond);
422  return 0;
423 }
424 
426 {
427  int ret;
428 
429  SDL_LockMutex(q->mutex);
430  ret = packet_queue_put_private(q, pkt);
431  SDL_UnlockMutex(q->mutex);
432 
433  if (pkt != &flush_pkt && ret < 0)
434  av_packet_unref(pkt);
435 
436  return ret;
437 }
438 
439 static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
440 {
441  AVPacket pkt1, *pkt = &pkt1;
442  av_init_packet(pkt);
443  pkt->data = NULL;
444  pkt->size = 0;
445  pkt->stream_index = stream_index;
446  return packet_queue_put(q, pkt);
447 }
448 
449 /* packet queue handling */
451 {
452  memset(q, 0, sizeof(PacketQueue));
453  q->mutex = SDL_CreateMutex();
454  if (!q->mutex) {
455  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
456  return AVERROR(ENOMEM);
457  }
458  q->cond = SDL_CreateCond();
459  if (!q->cond) {
460  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
461  return AVERROR(ENOMEM);
462  }
463  q->abort_request = 1;
464  return 0;
465 }
466 
468 {
469  MyAVPacketList *pkt, *pkt1;
470 
471  SDL_LockMutex(q->mutex);
472  for (pkt = q->first_pkt; pkt; pkt = pkt1) {
473  pkt1 = pkt->next;
474  av_packet_unref(&pkt->pkt);
475  av_freep(&pkt);
476  }
477  q->last_pkt = NULL;
478  q->first_pkt = NULL;
479  q->nb_packets = 0;
480  q->size = 0;
481  q->duration = 0;
482  SDL_UnlockMutex(q->mutex);
483 }
484 
486 {
488  SDL_DestroyMutex(q->mutex);
489  SDL_DestroyCond(q->cond);
490 }
491 
493 {
494  SDL_LockMutex(q->mutex);
495 
496  q->abort_request = 1;
497 
498  SDL_CondSignal(q->cond);
499 
500  SDL_UnlockMutex(q->mutex);
501 }
502 
504 {
505  SDL_LockMutex(q->mutex);
506  q->abort_request = 0;
507  packet_queue_put_private(q, &flush_pkt);
508  SDL_UnlockMutex(q->mutex);
509 }
510 
511 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
512 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
513 {
514  MyAVPacketList *pkt1;
515  int ret;
516 
517  SDL_LockMutex(q->mutex);
518 
519  for (;;) {
520  if (q->abort_request) {
521  ret = -1;
522  break;
523  }
524 
525  pkt1 = q->first_pkt;
526  if (pkt1) {
527  q->first_pkt = pkt1->next;
528  if (!q->first_pkt)
529  q->last_pkt = NULL;
530  q->nb_packets--;
531  q->size -= pkt1->pkt.size + sizeof(*pkt1);
532  q->duration -= pkt1->pkt.duration;
533  *pkt = pkt1->pkt;
534  if (serial)
535  *serial = pkt1->serial;
536  av_free(pkt1);
537  ret = 1;
538  break;
539  } else if (!block) {
540  ret = 0;
541  break;
542  } else {
543  SDL_CondWait(q->cond, q->mutex);
544  }
545  }
546  SDL_UnlockMutex(q->mutex);
547  return ret;
548 }
549 
550 static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
551  memset(d, 0, sizeof(Decoder));
552  d->avctx = avctx;
553  d->queue = queue;
554  d->empty_queue_cond = empty_queue_cond;
556 }
557 
559  int got_frame = 0;
560 
561  do {
562  int ret = -1;
563 
564  if (d->queue->abort_request)
565  return -1;
566 
567  if (!d->packet_pending || d->queue->serial != d->pkt_serial) {
568  AVPacket pkt;
569  do {
570  if (d->queue->nb_packets == 0)
571  SDL_CondSignal(d->empty_queue_cond);
572  if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
573  return -1;
574  if (pkt.data == flush_pkt.data) {
576  d->finished = 0;
577  d->next_pts = d->start_pts;
578  d->next_pts_tb = d->start_pts_tb;
579  }
580  } while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial);
581  av_packet_unref(&d->pkt);
582  d->pkt_temp = d->pkt = pkt;
583  d->packet_pending = 1;
584  }
585 
586  switch (d->avctx->codec_type) {
587  case AVMEDIA_TYPE_VIDEO:
588  ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp);
589  if (got_frame) {
590  if (decoder_reorder_pts == -1) {
591  frame->pts = av_frame_get_best_effort_timestamp(frame);
592  } else if (!decoder_reorder_pts) {
593  frame->pts = frame->pkt_dts;
594  }
595  }
596  break;
597  case AVMEDIA_TYPE_AUDIO:
598  ret = avcodec_decode_audio4(d->avctx, frame, &got_frame, &d->pkt_temp);
599  if (got_frame) {
600  AVRational tb = (AVRational){1, frame->sample_rate};
601  if (frame->pts != AV_NOPTS_VALUE)
602  frame->pts = av_rescale_q(frame->pts, av_codec_get_pkt_timebase(d->avctx), tb);
603  else if (d->next_pts != AV_NOPTS_VALUE)
604  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
605  if (frame->pts != AV_NOPTS_VALUE) {
606  d->next_pts = frame->pts + frame->nb_samples;
607  d->next_pts_tb = tb;
608  }
609  }
610  break;
612  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &d->pkt_temp);
613  break;
614  }
615 
616  if (ret < 0) {
617  d->packet_pending = 0;
618  } else {
619  d->pkt_temp.dts =
621  if (d->pkt_temp.data) {
623  ret = d->pkt_temp.size;
624  d->pkt_temp.data += ret;
625  d->pkt_temp.size -= ret;
626  if (d->pkt_temp.size <= 0)
627  d->packet_pending = 0;
628  } else {
629  if (!got_frame) {
630  d->packet_pending = 0;
631  d->finished = d->pkt_serial;
632  }
633  }
634  }
635  } while (!got_frame && !d->finished);
636 
637  return got_frame;
638 }
639 
640 static void decoder_destroy(Decoder *d) {
641  av_packet_unref(&d->pkt);
643 }
644 
646 {
647  av_frame_unref(vp->frame);
648  avsubtitle_free(&vp->sub);
649 }
650 
651 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
652 {
653  int i;
654  memset(f, 0, sizeof(FrameQueue));
655  if (!(f->mutex = SDL_CreateMutex())) {
656  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
657  return AVERROR(ENOMEM);
658  }
659  if (!(f->cond = SDL_CreateCond())) {
660  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
661  return AVERROR(ENOMEM);
662  }
663  f->pktq = pktq;
664  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
665  f->keep_last = !!keep_last;
666  for (i = 0; i < f->max_size; i++)
667  if (!(f->queue[i].frame = av_frame_alloc()))
668  return AVERROR(ENOMEM);
669  return 0;
670 }
671 
673 {
674  int i;
675  for (i = 0; i < f->max_size; i++) {
676  Frame *vp = &f->queue[i];
678  av_frame_free(&vp->frame);
679  free_picture(vp);
680  }
681  SDL_DestroyMutex(f->mutex);
682  SDL_DestroyCond(f->cond);
683 }
684 
686 {
687  SDL_LockMutex(f->mutex);
688  SDL_CondSignal(f->cond);
689  SDL_UnlockMutex(f->mutex);
690 }
691 
693 {
694  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
695 }
696 
698 {
699  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
700 }
701 
703 {
704  return &f->queue[f->rindex];
705 }
706 
708 {
709  /* wait until we have space to put a new frame */
710  SDL_LockMutex(f->mutex);
711  while (f->size >= f->max_size &&
712  !f->pktq->abort_request) {
713  SDL_CondWait(f->cond, f->mutex);
714  }
715  SDL_UnlockMutex(f->mutex);
716 
717  if (f->pktq->abort_request)
718  return NULL;
719 
720  return &f->queue[f->windex];
721 }
722 
724 {
725  /* wait until we have a readable a new frame */
726  SDL_LockMutex(f->mutex);
727  while (f->size - f->rindex_shown <= 0 &&
728  !f->pktq->abort_request) {
729  SDL_CondWait(f->cond, f->mutex);
730  }
731  SDL_UnlockMutex(f->mutex);
732 
733  if (f->pktq->abort_request)
734  return NULL;
735 
736  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
737 }
738 
740 {
741  if (++f->windex == f->max_size)
742  f->windex = 0;
743  SDL_LockMutex(f->mutex);
744  f->size++;
745  SDL_CondSignal(f->cond);
746  SDL_UnlockMutex(f->mutex);
747 }
748 
750 {
751  if (f->keep_last && !f->rindex_shown) {
752  f->rindex_shown = 1;
753  return;
754  }
756  if (++f->rindex == f->max_size)
757  f->rindex = 0;
758  SDL_LockMutex(f->mutex);
759  f->size--;
760  SDL_CondSignal(f->cond);
761  SDL_UnlockMutex(f->mutex);
762 }
763 
764 /* return the number of undisplayed frames in the queue */
766 {
767  return f->size - f->rindex_shown;
768 }
769 
770 /* return last shown position */
772 {
773  Frame *fp = &f->queue[f->rindex];
774  if (f->rindex_shown && fp->serial == f->pktq->serial)
775  return fp->pos;
776  else
777  return -1;
778 }
779 
780 static void decoder_abort(Decoder *d, FrameQueue *fq)
781 {
783  frame_queue_signal(fq);
784  SDL_WaitThread(d->decoder_tid, NULL);
785  d->decoder_tid = NULL;
787 }
788 
789 static inline void fill_rectangle(int x, int y, int w, int h)
790 {
791  SDL_Rect rect;
792  rect.x = x;
793  rect.y = y;
794  rect.w = w;
795  rect.h = h;
796  if (w && h)
797  SDL_RenderFillRect(renderer, &rect);
798 }
799 
800 static void free_picture(Frame *vp)
801 {
802  if (vp->bmp) {
803  SDL_DestroyTexture(vp->bmp);
804  vp->bmp = NULL;
805  }
806 }
807 
808 static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
809 {
810  Uint32 format;
811  int access, w, h;
812  if (SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
813  void *pixels;
814  int pitch;
815  SDL_DestroyTexture(*texture);
816  if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
817  return -1;
818  if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
819  return -1;
820  if (init_texture) {
821  if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
822  return -1;
823  memset(pixels, 0, pitch * new_height);
824  SDL_UnlockTexture(*texture);
825  }
826  }
827  return 0;
828 }
829 
830 static void calculate_display_rect(SDL_Rect *rect,
831  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
832  int pic_width, int pic_height, AVRational pic_sar)
833 {
834  float aspect_ratio;
835  int width, height, x, y;
836 
837  if (pic_sar.num == 0)
838  aspect_ratio = 0;
839  else
840  aspect_ratio = av_q2d(pic_sar);
841 
842  if (aspect_ratio <= 0.0)
843  aspect_ratio = 1.0;
844  aspect_ratio *= (float)pic_width / (float)pic_height;
845 
846  /* XXX: we suppose the screen has a 1.0 pixel ratio */
847  height = scr_height;
848  width = lrint(height * aspect_ratio) & ~1;
849  if (width > scr_width) {
850  width = scr_width;
851  height = lrint(width / aspect_ratio) & ~1;
852  }
853  x = (scr_width - width) / 2;
854  y = (scr_height - height) / 2;
855  rect->x = scr_xleft + x;
856  rect->y = scr_ytop + y;
857  rect->w = FFMAX(width, 1);
858  rect->h = FFMAX(height, 1);
859 }
860 
861 static int upload_texture(SDL_Texture *tex, AVFrame *frame, struct SwsContext **img_convert_ctx) {
862  int ret = 0;
863  switch (frame->format) {
864  case AV_PIX_FMT_YUV420P:
865  if (frame->linesize[0] < 0 || frame->linesize[1] < 0 || frame->linesize[2] < 0) {
866  av_log(NULL, AV_LOG_ERROR, "Negative linesize is not supported for YUV.\n");
867  return -1;
868  }
869  ret = SDL_UpdateYUVTexture(tex, NULL, frame->data[0], frame->linesize[0],
870  frame->data[1], frame->linesize[1],
871  frame->data[2], frame->linesize[2]);
872  break;
873  case AV_PIX_FMT_BGRA:
874  if (frame->linesize[0] < 0) {
875  ret = SDL_UpdateTexture(tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
876  } else {
877  ret = SDL_UpdateTexture(tex, NULL, frame->data[0], frame->linesize[0]);
878  }
879  break;
880  default:
881  /* This should only happen if we are not using avfilter... */
882  *img_convert_ctx = sws_getCachedContext(*img_convert_ctx,
883  frame->width, frame->height, frame->format, frame->width, frame->height,
885  if (*img_convert_ctx != NULL) {
886  uint8_t *pixels;
887  int pitch;
888  if (!SDL_LockTexture(tex, NULL, (void **)&pixels, &pitch)) {
889  sws_scale(*img_convert_ctx, (const uint8_t * const *)frame->data, frame->linesize,
890  0, frame->height, &pixels, &pitch);
891  SDL_UnlockTexture(tex);
892  }
893  } else {
894  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
895  ret = -1;
896  }
897  break;
898  }
899  return ret;
900 }
901 
903 {
904  Frame *vp;
905  Frame *sp = NULL;
906  SDL_Rect rect;
907 
908  vp = frame_queue_peek_last(&is->pictq);
909  if (vp->bmp) {
910  if (is->subtitle_st) {
911  if (frame_queue_nb_remaining(&is->subpq) > 0) {
912  sp = frame_queue_peek(&is->subpq);
913 
914  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
915  if (!sp->uploaded) {
916  uint8_t *pixels;
917  int pitch;
918  int i;
919  if (!sp->width || !sp->height) {
920  sp->width = vp->width;
921  sp->height = vp->height;
922  }
923  if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
924  return;
925 
926  for (i = 0; i < sp->sub.num_rects; i++) {
927  AVSubtitleRect *sub_rect = sp->sub.rects[i];
928 
929  sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
930  sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
931  sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
932  sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
933 
935  sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
936  sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
937  0, NULL, NULL, NULL);
938  if (!is->sub_convert_ctx) {
939  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
940  return;
941  }
942  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
943  sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
944  0, sub_rect->h, &pixels, &pitch);
945  SDL_UnlockTexture(is->sub_texture);
946  }
947  }
948  sp->uploaded = 1;
949  }
950  } else
951  sp = NULL;
952  }
953  }
954 
955  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
956 
957  if (!vp->uploaded) {
958  if (upload_texture(vp->bmp, vp->frame, &is->img_convert_ctx) < 0)
959  return;
960  vp->uploaded = 1;
961  vp->flip_v = vp->frame->linesize[0] < 0;
962  }
963 
964  SDL_RenderCopyEx(renderer, vp->bmp, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
965  if (sp) {
966 #if USE_ONEPASS_SUBTITLE_RENDER
967  SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
968 #else
969  int i;
970  double xratio = (double)rect.w / (double)sp->width;
971  double yratio = (double)rect.h / (double)sp->height;
972  for (i = 0; i < sp->sub.num_rects; i++) {
973  SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
974  SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
975  .y = rect.y + sub_rect->y * yratio,
976  .w = sub_rect->w * xratio,
977  .h = sub_rect->h * yratio};
978  SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
979  }
980 #endif
981  }
982  }
983 }
984 
985 static inline int compute_mod(int a, int b)
986 {
987  return a < 0 ? a%b + b : a%b;
988 }
989 
991 {
992  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
993  int ch, channels, h, h2;
994  int64_t time_diff;
995  int rdft_bits, nb_freq;
996 
997  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
998  ;
999  nb_freq = 1 << (rdft_bits - 1);
1000 
1001  /* compute display index : center on currently output samples */
1002  channels = s->audio_tgt.channels;
1003  nb_display_channels = channels;
1004  if (!s->paused) {
1005  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1006  n = 2 * channels;
1007  delay = s->audio_write_buf_size;
1008  delay /= n;
1009 
1010  /* to be more precise, we take into account the time spent since
1011  the last buffer computation */
1012  if (audio_callback_time) {
1013  time_diff = av_gettime_relative() - audio_callback_time;
1014  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1015  }
1016 
1017  delay += 2 * data_used;
1018  if (delay < data_used)
1019  delay = data_used;
1020 
1021  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1022  if (s->show_mode == SHOW_MODE_WAVES) {
1023  h = INT_MIN;
1024  for (i = 0; i < 1000; i += channels) {
1025  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1026  int a = s->sample_array[idx];
1027  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1028  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1029  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1030  int score = a - d;
1031  if (h < score && (b ^ c) < 0) {
1032  h = score;
1033  i_start = idx;
1034  }
1035  }
1036  }
1037 
1038  s->last_i_start = i_start;
1039  } else {
1040  i_start = s->last_i_start;
1041  }
1042 
1043  if (s->show_mode == SHOW_MODE_WAVES) {
1044  SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
1045 
1046  /* total height for one channel */
1047  h = s->height / nb_display_channels;
1048  /* graph height / 2 */
1049  h2 = (h * 9) / 20;
1050  for (ch = 0; ch < nb_display_channels; ch++) {
1051  i = i_start + ch;
1052  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1053  for (x = 0; x < s->width; x++) {
1054  y = (s->sample_array[i] * h2) >> 15;
1055  if (y < 0) {
1056  y = -y;
1057  ys = y1 - y;
1058  } else {
1059  ys = y1;
1060  }
1061  fill_rectangle(s->xleft + x, ys, 1, y);
1062  i += channels;
1063  if (i >= SAMPLE_ARRAY_SIZE)
1064  i -= SAMPLE_ARRAY_SIZE;
1065  }
1066  }
1067 
1068  SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
1069 
1070  for (ch = 1; ch < nb_display_channels; ch++) {
1071  y = s->ytop + ch * h;
1072  fill_rectangle(s->xleft, y, s->width, 1);
1073  }
1074  } else {
1075  if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
1076  return;
1077 
1078  nb_display_channels= FFMIN(nb_display_channels, 2);
1079  if (rdft_bits != s->rdft_bits) {
1080  av_rdft_end(s->rdft);
1081  av_free(s->rdft_data);
1082  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
1083  s->rdft_bits = rdft_bits;
1084  s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
1085  }
1086  if (!s->rdft || !s->rdft_data){
1087  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1088  s->show_mode = SHOW_MODE_WAVES;
1089  } else {
1090  FFTSample *data[2];
1091  SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
1092  uint32_t *pixels;
1093  int pitch;
1094  for (ch = 0; ch < nb_display_channels; ch++) {
1095  data[ch] = s->rdft_data + 2 * nb_freq * ch;
1096  i = i_start + ch;
1097  for (x = 0; x < 2 * nb_freq; x++) {
1098  double w = (x-nb_freq) * (1.0 / nb_freq);
1099  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
1100  i += channels;
1101  if (i >= SAMPLE_ARRAY_SIZE)
1102  i -= SAMPLE_ARRAY_SIZE;
1103  }
1104  av_rdft_calc(s->rdft, data[ch]);
1105  }
1106  /* Least efficient way to do this, we should of course
1107  * directly access it but it is more than fast enough. */
1108  if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
1109  pitch >>= 2;
1110  pixels += pitch * s->height;
1111  for (y = 0; y < s->height; y++) {
1112  double w = 1 / sqrt(nb_freq);
1113  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1114  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][2 * y + 0], data[1][2 * y + 1]))
1115  : a;
1116  a = FFMIN(a, 255);
1117  b = FFMIN(b, 255);
1118  pixels -= pitch;
1119  *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
1120  }
1121  SDL_UnlockTexture(s->vis_texture);
1122  }
1123  SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
1124  }
1125  if (!s->paused)
1126  s->xpos++;
1127  if (s->xpos >= s->width)
1128  s->xpos= s->xleft;
1129  }
1130 }
1131 
1132 static void stream_component_close(VideoState *is, int stream_index)
1133 {
1134  AVFormatContext *ic = is->ic;
1135  AVCodecParameters *codecpar;
1136 
1137  if (stream_index < 0 || stream_index >= ic->nb_streams)
1138  return;
1139  codecpar = ic->streams[stream_index]->codecpar;
1140 
1141  switch (codecpar->codec_type) {
1142  case AVMEDIA_TYPE_AUDIO:
1143  decoder_abort(&is->auddec, &is->sampq);
1144  SDL_CloseAudio();
1145  decoder_destroy(&is->auddec);
1146  swr_free(&is->swr_ctx);
1147  av_freep(&is->audio_buf1);
1148  is->audio_buf1_size = 0;
1149  is->audio_buf = NULL;
1150 
1151  if (is->rdft) {
1152  av_rdft_end(is->rdft);
1153  av_freep(&is->rdft_data);
1154  is->rdft = NULL;
1155  is->rdft_bits = 0;
1156  }
1157  break;
1158  case AVMEDIA_TYPE_VIDEO:
1159  decoder_abort(&is->viddec, &is->pictq);
1160  decoder_destroy(&is->viddec);
1161  break;
1162  case AVMEDIA_TYPE_SUBTITLE:
1163  decoder_abort(&is->subdec, &is->subpq);
1164  decoder_destroy(&is->subdec);
1165  break;
1166  default:
1167  break;
1168  }
1169 
1170  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1171  switch (codecpar->codec_type) {
1172  case AVMEDIA_TYPE_AUDIO:
1173  is->audio_st = NULL;
1174  is->audio_stream = -1;
1175  break;
1176  case AVMEDIA_TYPE_VIDEO:
1177  is->video_st = NULL;
1178  is->video_stream = -1;
1179  break;
1180  case AVMEDIA_TYPE_SUBTITLE:
1181  is->subtitle_st = NULL;
1182  is->subtitle_stream = -1;
1183  break;
1184  default:
1185  break;
1186  }
1187 }
1188 
1189 static void stream_close(VideoState *is)
1190 {
1191  /* XXX: use a special url_shutdown call to abort parse cleanly */
1192  is->abort_request = 1;
1193  SDL_WaitThread(is->read_tid, NULL);
1194 
1195  /* close each stream */
1196  if (is->audio_stream >= 0)
1198  if (is->video_stream >= 0)
1200  if (is->subtitle_stream >= 0)
1202 
1203  avformat_close_input(&is->ic);
1204 
1208 
1209  /* free all pictures */
1210  frame_queue_destory(&is->pictq);
1211  frame_queue_destory(&is->sampq);
1212  frame_queue_destory(&is->subpq);
1213  SDL_DestroyCond(is->continue_read_thread);
1216  av_free(is->filename);
1217  if (is->vis_texture)
1218  SDL_DestroyTexture(is->vis_texture);
1219  if (is->sub_texture)
1220  SDL_DestroyTexture(is->sub_texture);
1221  av_free(is);
1222 }
1223 
1224 static void do_exit(VideoState *is)
1225 {
1226  if (is) {
1227  stream_close(is);
1228  }
1229  if (renderer)
1230  SDL_DestroyRenderer(renderer);
1231  if (window)
1232  SDL_DestroyWindow(window);
1234  uninit_opts();
1235 #if CONFIG_AVFILTER
1236  av_freep(&vfilters_list);
1237 #endif
1239  if (show_status)
1240  printf("\n");
1241  SDL_Quit();
1242  av_log(NULL, AV_LOG_QUIET, "%s", "");
1243  exit(0);
1244 }
1245 
1246 static void sigterm_handler(int sig)
1247 {
1248  exit(123);
1249 }
1250 
1252 {
1253  SDL_Rect rect;
1254  calculate_display_rect(&rect, 0, 0, INT_MAX, height, width, height, sar);
1255  default_width = rect.w;
1256  default_height = rect.h;
1257 }
1258 
1259 static int video_open(VideoState *is, Frame *vp)
1260 {
1261  int w,h;
1262 
1263  if (vp && vp->width)
1264  set_default_window_size(vp->width, vp->height, vp->sar);
1265 
1266  if (screen_width) {
1267  w = screen_width;
1268  h = screen_height;
1269  } else {
1270  w = default_width;
1271  h = default_height;
1272  }
1273 
1274  if (!window) {
1275  int flags = SDL_WINDOW_SHOWN | SDL_WINDOW_RESIZABLE;
1276  if (!window_title)
1278  if (is_full_screen)
1279  flags |= SDL_WINDOW_FULLSCREEN_DESKTOP;
1280  window = SDL_CreateWindow(window_title, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, w, h, flags);
1281  SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
1282  if (window) {
1283  SDL_RendererInfo info;
1284  renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
1285  if (!renderer) {
1286  av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
1287  renderer = SDL_CreateRenderer(window, -1, 0);
1288  }
1289  if (renderer) {
1290  if (!SDL_GetRendererInfo(renderer, &info))
1291  av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", info.name);
1292  }
1293  }
1294  } else {
1295  SDL_SetWindowSize(window, w, h);
1296  }
1297 
1298  if (!window || !renderer) {
1299  av_log(NULL, AV_LOG_FATAL, "SDL: could not set video mode - exiting\n");
1300  do_exit(is);
1301  }
1302 
1303  is->width = w;
1304  is->height = h;
1305 
1306  return 0;
1307 }
1308 
1309 /* display the current picture, if any */
1310 static void video_display(VideoState *is)
1311 {
1312  if (!window)
1313  video_open(is, NULL);
1314 
1315  SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1316  SDL_RenderClear(renderer);
1317  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1318  video_audio_display(is);
1319  else if (is->video_st)
1320  video_image_display(is);
1321  SDL_RenderPresent(renderer);
1322 }
1323 
1324 static double get_clock(Clock *c)
1325 {
1326  if (*c->queue_serial != c->serial)
1327  return NAN;
1328  if (c->paused) {
1329  return c->pts;
1330  } else {
1331  double time = av_gettime_relative() / 1000000.0;
1332  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1333  }
1334 }
1335 
1336 static void set_clock_at(Clock *c, double pts, int serial, double time)
1337 {
1338  c->pts = pts;
1339  c->last_updated = time;
1340  c->pts_drift = c->pts - time;
1341  c->serial = serial;
1342 }
1343 
1344 static void set_clock(Clock *c, double pts, int serial)
1345 {
1346  double time = av_gettime_relative() / 1000000.0;
1347  set_clock_at(c, pts, serial, time);
1348 }
1349 
1350 static void set_clock_speed(Clock *c, double speed)
1351 {
1352  set_clock(c, get_clock(c), c->serial);
1353  c->speed = speed;
1354 }
1355 
1356 static void init_clock(Clock *c, int *queue_serial)
1357 {
1358  c->speed = 1.0;
1359  c->paused = 0;
1360  c->queue_serial = queue_serial;
1361  set_clock(c, NAN, -1);
1362 }
1363 
1364 static void sync_clock_to_slave(Clock *c, Clock *slave)
1365 {
1366  double clock = get_clock(c);
1367  double slave_clock = get_clock(slave);
1368  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1369  set_clock(c, slave_clock, slave->serial);
1370 }
1371 
1373  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1374  if (is->video_st)
1375  return AV_SYNC_VIDEO_MASTER;
1376  else
1377  return AV_SYNC_AUDIO_MASTER;
1378  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1379  if (is->audio_st)
1380  return AV_SYNC_AUDIO_MASTER;
1381  else
1382  return AV_SYNC_EXTERNAL_CLOCK;
1383  } else {
1384  return AV_SYNC_EXTERNAL_CLOCK;
1385  }
1386 }
1387 
1388 /* get the current master clock value */
1389 static double get_master_clock(VideoState *is)
1390 {
1391  double val;
1392 
1393  switch (get_master_sync_type(is)) {
1394  case AV_SYNC_VIDEO_MASTER:
1395  val = get_clock(&is->vidclk);
1396  break;
1397  case AV_SYNC_AUDIO_MASTER:
1398  val = get_clock(&is->audclk);
1399  break;
1400  default:
1401  val = get_clock(&is->extclk);
1402  break;
1403  }
1404  return val;
1405 }
1406 
1408  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1411  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1414  } else {
1415  double speed = is->extclk.speed;
1416  if (speed != 1.0)
1417  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1418  }
1419 }
1420 
1421 /* seek in the stream */
1422 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1423 {
1424  if (!is->seek_req) {
1425  is->seek_pos = pos;
1426  is->seek_rel = rel;
1427  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1428  if (seek_by_bytes)
1430  is->seek_req = 1;
1431  SDL_CondSignal(is->continue_read_thread);
1432  }
1433 }
1434 
1435 /* pause or resume the video */
1437 {
1438  if (is->paused) {
1439  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1440  if (is->read_pause_return != AVERROR(ENOSYS)) {
1441  is->vidclk.paused = 0;
1442  }
1443  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1444  }
1445  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1446  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1447 }
1448 
1449 static void toggle_pause(VideoState *is)
1450 {
1451  stream_toggle_pause(is);
1452  is->step = 0;
1453 }
1454 
1455 static void toggle_mute(VideoState *is)
1456 {
1457  is->muted = !is->muted;
1458 }
1459 
1460 static void update_volume(VideoState *is, int sign, int step)
1461 {
1462  is->audio_volume = av_clip(is->audio_volume + sign * step, 0, SDL_MIX_MAXVOLUME);
1463 }
1464 
1466 {
1467  /* if the stream is paused unpause it, then step */
1468  if (is->paused)
1469  stream_toggle_pause(is);
1470  is->step = 1;
1471 }
1472 
1473 static double compute_target_delay(double delay, VideoState *is)
1474 {
1475  double sync_threshold, diff = 0;
1476 
1477  /* update delay to follow master synchronisation source */
1479  /* if video is slave, we try to correct big delays by
1480  duplicating or deleting a frame */
1481  diff = get_clock(&is->vidclk) - get_master_clock(is);
1482 
1483  /* skip or repeat frame. We take into account the
1484  delay to compute the threshold. I still don't know
1485  if it is the best guess */
1486  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1487  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1488  if (diff <= -sync_threshold)
1489  delay = FFMAX(0, delay + diff);
1490  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1491  delay = delay + diff;
1492  else if (diff >= sync_threshold)
1493  delay = 2 * delay;
1494  }
1495  }
1496 
1497  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1498  delay, -diff);
1499 
1500  return delay;
1501 }
1502 
1503 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1504  if (vp->serial == nextvp->serial) {
1505  double duration = nextvp->pts - vp->pts;
1506  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1507  return vp->duration;
1508  else
1509  return duration;
1510  } else {
1511  return 0.0;
1512  }
1513 }
1514 
1515 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1516  /* update current video pts */
1517  set_clock(&is->vidclk, pts, serial);
1518  sync_clock_to_slave(&is->extclk, &is->vidclk);
1519 }
1520 
1521 /* called to display each frame */
1522 static void video_refresh(void *opaque, double *remaining_time)
1523 {
1524  VideoState *is = opaque;
1525  double time;
1526 
1527  Frame *sp, *sp2;
1528 
1529  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1531 
1532  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1533  time = av_gettime_relative() / 1000000.0;
1534  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1535  video_display(is);
1536  is->last_vis_time = time;
1537  }
1538  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1539  }
1540 
1541  if (is->video_st) {
1542 retry:
1543  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1544  // nothing to do, no picture to display in the queue
1545  } else {
1546  double last_duration, duration, delay;
1547  Frame *vp, *lastvp;
1548 
1549  /* dequeue the picture */
1550  lastvp = frame_queue_peek_last(&is->pictq);
1551  vp = frame_queue_peek(&is->pictq);
1552 
1553  if (vp->serial != is->videoq.serial) {
1554  frame_queue_next(&is->pictq);
1555  goto retry;
1556  }
1557 
1558  if (lastvp->serial != vp->serial)
1559  is->frame_timer = av_gettime_relative() / 1000000.0;
1560 
1561  if (is->paused)
1562  goto display;
1563 
1564  /* compute nominal last_duration */
1565  last_duration = vp_duration(is, lastvp, vp);
1566  delay = compute_target_delay(last_duration, is);
1567 
1568  time= av_gettime_relative()/1000000.0;
1569  if (time < is->frame_timer + delay) {
1570  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1571  goto display;
1572  }
1573 
1574  is->frame_timer += delay;
1575  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1576  is->frame_timer = time;
1577 
1578  SDL_LockMutex(is->pictq.mutex);
1579  if (!isnan(vp->pts))
1580  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1581  SDL_UnlockMutex(is->pictq.mutex);
1582 
1583  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1584  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1585  duration = vp_duration(is, vp, nextvp);
1586  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1587  is->frame_drops_late++;
1588  frame_queue_next(&is->pictq);
1589  goto retry;
1590  }
1591  }
1592 
1593  if (is->subtitle_st) {
1594  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1595  sp = frame_queue_peek(&is->subpq);
1596 
1597  if (frame_queue_nb_remaining(&is->subpq) > 1)
1598  sp2 = frame_queue_peek_next(&is->subpq);
1599  else
1600  sp2 = NULL;
1601 
1602  if (sp->serial != is->subtitleq.serial
1603  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1604  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1605  {
1606  if (sp->uploaded) {
1607  int i;
1608  for (i = 0; i < sp->sub.num_rects; i++) {
1609  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1610  uint8_t *pixels;
1611  int pitch, j;
1612 
1613  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
1614  for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1615  memset(pixels, 0, sub_rect->w << 2);
1616  SDL_UnlockTexture(is->sub_texture);
1617  }
1618  }
1619  }
1620  frame_queue_next(&is->subpq);
1621  } else {
1622  break;
1623  }
1624  }
1625  }
1626 
1627  frame_queue_next(&is->pictq);
1628  is->force_refresh = 1;
1629 
1630  if (is->step && !is->paused)
1631  stream_toggle_pause(is);
1632  }
1633 display:
1634  /* display picture */
1635  if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1636  video_display(is);
1637  }
1638  is->force_refresh = 0;
1639  if (show_status) {
1640  static int64_t last_time;
1641  int64_t cur_time;
1642  int aqsize, vqsize, sqsize;
1643  double av_diff;
1644 
1645  cur_time = av_gettime_relative();
1646  if (!last_time || (cur_time - last_time) >= 30000) {
1647  aqsize = 0;
1648  vqsize = 0;
1649  sqsize = 0;
1650  if (is->audio_st)
1651  aqsize = is->audioq.size;
1652  if (is->video_st)
1653  vqsize = is->videoq.size;
1654  if (is->subtitle_st)
1655  sqsize = is->subtitleq.size;
1656  av_diff = 0;
1657  if (is->audio_st && is->video_st)
1658  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1659  else if (is->video_st)
1660  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1661  else if (is->audio_st)
1662  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1664  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1665  get_master_clock(is),
1666  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1667  av_diff,
1669  aqsize / 1024,
1670  vqsize / 1024,
1671  sqsize,
1674  fflush(stdout);
1675  last_time = cur_time;
1676  }
1677  }
1678 }
1679 
1680 /* allocate a picture (needs to do that in main thread to avoid
1681  potential locking problems */
1682 static void alloc_picture(VideoState *is)
1683 {
1684  Frame *vp;
1685  int sdl_format;
1686 
1687  vp = &is->pictq.queue[is->pictq.windex];
1688 
1689  video_open(is, vp);
1690 
1691  if (vp->format == AV_PIX_FMT_YUV420P)
1692  sdl_format = SDL_PIXELFORMAT_YV12;
1693  else
1694  sdl_format = SDL_PIXELFORMAT_ARGB8888;
1695 
1696  if (realloc_texture(&vp->bmp, sdl_format, vp->width, vp->height, SDL_BLENDMODE_NONE, 0) < 0) {
1697  /* SDL allocates a buffer smaller than requested if the video
1698  * overlay hardware is unable to support the requested size. */
1700  "Error: the video system does not support an image\n"
1701  "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1702  "to reduce the image size.\n", vp->width, vp->height );
1703  do_exit(is);
1704  }
1705 
1706  SDL_LockMutex(is->pictq.mutex);
1707  vp->allocated = 1;
1708  SDL_CondSignal(is->pictq.cond);
1709  SDL_UnlockMutex(is->pictq.mutex);
1710 }
1711 
1712 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1713 {
1714  Frame *vp;
1715 
1716 #if defined(DEBUG_SYNC)
1717  printf("frame_type=%c pts=%0.3f\n",
1718  av_get_picture_type_char(src_frame->pict_type), pts);
1719 #endif
1720 
1721  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1722  return -1;
1723 
1724  vp->sar = src_frame->sample_aspect_ratio;
1725  vp->uploaded = 0;
1726 
1727  /* alloc or resize hardware picture buffer */
1728  if (!vp->bmp || !vp->allocated ||
1729  vp->width != src_frame->width ||
1730  vp->height != src_frame->height ||
1731  vp->format != src_frame->format) {
1732  SDL_Event event;
1733 
1734  vp->allocated = 0;
1735  vp->width = src_frame->width;
1736  vp->height = src_frame->height;
1737  vp->format = src_frame->format;
1738 
1739  /* the allocation must be done in the main thread to avoid
1740  locking problems. */
1741  event.type = FF_ALLOC_EVENT;
1742  event.user.data1 = is;
1743  SDL_PushEvent(&event);
1744 
1745  /* wait until the picture is allocated */
1746  SDL_LockMutex(is->pictq.mutex);
1747  while (!vp->allocated && !is->videoq.abort_request) {
1748  SDL_CondWait(is->pictq.cond, is->pictq.mutex);
1749  }
1750  /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1751  if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, FF_ALLOC_EVENT, FF_ALLOC_EVENT) != 1) {
1752  while (!vp->allocated && !is->abort_request) {
1753  SDL_CondWait(is->pictq.cond, is->pictq.mutex);
1754  }
1755  }
1756  SDL_UnlockMutex(is->pictq.mutex);
1757 
1758  if (is->videoq.abort_request)
1759  return -1;
1760  }
1761 
1762  /* if the frame is not skipped, then display it */
1763  if (vp->bmp) {
1764  vp->pts = pts;
1765  vp->duration = duration;
1766  vp->pos = pos;
1767  vp->serial = serial;
1768 
1769  av_frame_move_ref(vp->frame, src_frame);
1770  frame_queue_push(&is->pictq);
1771  }
1772  return 0;
1773 }
1774 
1776 {
1777  int got_picture;
1778 
1779  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1780  return -1;
1781 
1782  if (got_picture) {
1783  double dpts = NAN;
1784 
1785  if (frame->pts != AV_NOPTS_VALUE)
1786  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1787 
1788  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1789 
1791  if (frame->pts != AV_NOPTS_VALUE) {
1792  double diff = dpts - get_master_clock(is);
1793  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1794  diff - is->frame_last_filter_delay < 0 &&
1795  is->viddec.pkt_serial == is->vidclk.serial &&
1796  is->videoq.nb_packets) {
1797  is->frame_drops_early++;
1798  av_frame_unref(frame);
1799  got_picture = 0;
1800  }
1801  }
1802  }
1803  }
1804 
1805  return got_picture;
1806 }
1807 
1808 #if CONFIG_AVFILTER
1809 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1810  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1811 {
1812  int ret, i;
1813  int nb_filters = graph->nb_filters;
1815 
1816  if (filtergraph) {
1817  outputs = avfilter_inout_alloc();
1818  inputs = avfilter_inout_alloc();
1819  if (!outputs || !inputs) {
1820  ret = AVERROR(ENOMEM);
1821  goto fail;
1822  }
1823 
1824  outputs->name = av_strdup("in");
1825  outputs->filter_ctx = source_ctx;
1826  outputs->pad_idx = 0;
1827  outputs->next = NULL;
1828 
1829  inputs->name = av_strdup("out");
1830  inputs->filter_ctx = sink_ctx;
1831  inputs->pad_idx = 0;
1832  inputs->next = NULL;
1833 
1834  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1835  goto fail;
1836  } else {
1837  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1838  goto fail;
1839  }
1840 
1841  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1842  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1843  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1844 
1845  ret = avfilter_graph_config(graph, NULL);
1846 fail:
1847  avfilter_inout_free(&outputs);
1848  avfilter_inout_free(&inputs);
1849  return ret;
1850 }
1851 
1852 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1853 {
1855  char sws_flags_str[512] = "";
1856  char buffersrc_args[256];
1857  int ret;
1858  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1859  AVCodecParameters *codecpar = is->video_st->codecpar;
1860  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1861  AVDictionaryEntry *e = NULL;
1862 
1863  while ((e = av_dict_get(sws_dict, "", e, AV_DICT_IGNORE_SUFFIX))) {
1864  if (!strcmp(e->key, "sws_flags")) {
1865  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1866  } else
1867  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1868  }
1869  if (strlen(sws_flags_str))
1870  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1871 
1872  graph->scale_sws_opts = av_strdup(sws_flags_str);
1873 
1874  snprintf(buffersrc_args, sizeof(buffersrc_args),
1875  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1876  frame->width, frame->height, frame->format,
1878  codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1));
1879  if (fr.num && fr.den)
1880  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1881 
1882  if ((ret = avfilter_graph_create_filter(&filt_src,
1883  avfilter_get_by_name("buffer"),
1884  "ffplay_buffer", buffersrc_args, NULL,
1885  graph)) < 0)
1886  goto fail;
1887 
1888  ret = avfilter_graph_create_filter(&filt_out,
1889  avfilter_get_by_name("buffersink"),
1890  "ffplay_buffersink", NULL, NULL, graph);
1891  if (ret < 0)
1892  goto fail;
1893 
1894  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1895  goto fail;
1896 
1897  last_filter = filt_out;
1898 
1899 /* Note: this macro adds a filter before the lastly added filter, so the
1900  * processing order of the filters is in reverse */
1901 #define INSERT_FILT(name, arg) do { \
1902  AVFilterContext *filt_ctx; \
1903  \
1904  ret = avfilter_graph_create_filter(&filt_ctx, \
1905  avfilter_get_by_name(name), \
1906  "ffplay_" name, arg, NULL, graph); \
1907  if (ret < 0) \
1908  goto fail; \
1909  \
1910  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1911  if (ret < 0) \
1912  goto fail; \
1913  \
1914  last_filter = filt_ctx; \
1915 } while (0)
1916 
1917  if (autorotate) {
1918  double theta = get_rotation(is->video_st);
1919 
1920  if (fabs(theta - 90) < 1.0) {
1921  INSERT_FILT("transpose", "clock");
1922  } else if (fabs(theta - 180) < 1.0) {
1923  INSERT_FILT("hflip", NULL);
1924  INSERT_FILT("vflip", NULL);
1925  } else if (fabs(theta - 270) < 1.0) {
1926  INSERT_FILT("transpose", "cclock");
1927  } else if (fabs(theta) > 1.0) {
1928  char rotate_buf[64];
1929  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1930  INSERT_FILT("rotate", rotate_buf);
1931  }
1932  }
1933 
1934  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1935  goto fail;
1936 
1937  is->in_video_filter = filt_src;
1938  is->out_video_filter = filt_out;
1939 
1940 fail:
1941  return ret;
1942 }
1943 
1944 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1945 {
1947  int sample_rates[2] = { 0, -1 };
1948  int64_t channel_layouts[2] = { 0, -1 };
1949  int channels[2] = { 0, -1 };
1950  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1951  char aresample_swr_opts[512] = "";
1952  AVDictionaryEntry *e = NULL;
1953  char asrc_args[256];
1954  int ret;
1955 
1956  avfilter_graph_free(&is->agraph);
1957  if (!(is->agraph = avfilter_graph_alloc()))
1958  return AVERROR(ENOMEM);
1959 
1960  while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
1961  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
1962  if (strlen(aresample_swr_opts))
1963  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
1964  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
1965 
1966  ret = snprintf(asrc_args, sizeof(asrc_args),
1967  "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1968  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1969  is->audio_filter_src.channels,
1970  1, is->audio_filter_src.freq);
1971  if (is->audio_filter_src.channel_layout)
1972  snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1973  ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
1974 
1975  ret = avfilter_graph_create_filter(&filt_asrc,
1976  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1977  asrc_args, NULL, is->agraph);
1978  if (ret < 0)
1979  goto end;
1980 
1981 
1982  ret = avfilter_graph_create_filter(&filt_asink,
1983  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1984  NULL, NULL, is->agraph);
1985  if (ret < 0)
1986  goto end;
1987 
1988  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1989  goto end;
1990  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1991  goto end;
1992 
1993  if (force_output_format) {
1994  channel_layouts[0] = is->audio_tgt.channel_layout;
1995  channels [0] = is->audio_tgt.channels;
1996  sample_rates [0] = is->audio_tgt.freq;
1997  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
1998  goto end;
1999  if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2000  goto end;
2001  if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2002  goto end;
2003  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2004  goto end;
2005  }
2006 
2007 
2008  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2009  goto end;
2010 
2011  is->in_audio_filter = filt_asrc;
2012  is->out_audio_filter = filt_asink;
2013 
2014 end:
2015  if (ret < 0)
2016  avfilter_graph_free(&is->agraph);
2017  return ret;
2018 }
2019 #endif /* CONFIG_AVFILTER */
2020 
2021 static int audio_thread(void *arg)
2022 {
2023  VideoState *is = arg;
2024  AVFrame *frame = av_frame_alloc();
2025  Frame *af;
2026 #if CONFIG_AVFILTER
2027  int last_serial = -1;
2028  int64_t dec_channel_layout;
2029  int reconfigure;
2030 #endif
2031  int got_frame = 0;
2032  AVRational tb;
2033  int ret = 0;
2034 
2035  if (!frame)
2036  return AVERROR(ENOMEM);
2037 
2038  do {
2039  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2040  goto the_end;
2041 
2042  if (got_frame) {
2043  tb = (AVRational){1, frame->sample_rate};
2044 
2045 #if CONFIG_AVFILTER
2046  dec_channel_layout = get_valid_channel_layout(frame->channel_layout, av_frame_get_channels(frame));
2047 
2048  reconfigure =
2049  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2050  frame->format, av_frame_get_channels(frame)) ||
2051  is->audio_filter_src.channel_layout != dec_channel_layout ||
2052  is->audio_filter_src.freq != frame->sample_rate ||
2053  is->auddec.pkt_serial != last_serial;
2054 
2055  if (reconfigure) {
2056  char buf1[1024], buf2[1024];
2057  av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2058  av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2060  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2061  is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2063 
2064  is->audio_filter_src.fmt = frame->format;
2065  is->audio_filter_src.channels = av_frame_get_channels(frame);
2066  is->audio_filter_src.channel_layout = dec_channel_layout;
2067  is->audio_filter_src.freq = frame->sample_rate;
2068  last_serial = is->auddec.pkt_serial;
2069 
2070  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2071  goto the_end;
2072  }
2073 
2074  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2075  goto the_end;
2076 
2077  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2078  tb = is->out_audio_filter->inputs[0]->time_base;
2079 #endif
2080  if (!(af = frame_queue_peek_writable(&is->sampq)))
2081  goto the_end;
2082 
2083  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2084  af->pos = av_frame_get_pkt_pos(frame);
2085  af->serial = is->auddec.pkt_serial;
2086  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2087 
2088  av_frame_move_ref(af->frame, frame);
2089  frame_queue_push(&is->sampq);
2090 
2091 #if CONFIG_AVFILTER
2092  if (is->audioq.serial != is->auddec.pkt_serial)
2093  break;
2094  }
2095  if (ret == AVERROR_EOF)
2096  is->auddec.finished = is->auddec.pkt_serial;
2097 #endif
2098  }
2099  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2100  the_end:
2101 #if CONFIG_AVFILTER
2102  avfilter_graph_free(&is->agraph);
2103 #endif
2104  av_frame_free(&frame);
2105  return ret;
2106 }
2107 
2108 static int decoder_start(Decoder *d, int (*fn)(void *), void *arg)
2109 {
2111  d->decoder_tid = SDL_CreateThread(fn, "decoder", arg);
2112  if (!d->decoder_tid) {
2113  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2114  return AVERROR(ENOMEM);
2115  }
2116  return 0;
2117 }
2118 
2119 static int video_thread(void *arg)
2120 {
2121  VideoState *is = arg;
2122  AVFrame *frame = av_frame_alloc();
2123  double pts;
2124  double duration;
2125  int ret;
2127  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2128 
2129 #if CONFIG_AVFILTER
2131  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2132  int last_w = 0;
2133  int last_h = 0;
2134  enum AVPixelFormat last_format = -2;
2135  int last_serial = -1;
2136  int last_vfilter_idx = 0;
2137  if (!graph) {
2138  av_frame_free(&frame);
2139  return AVERROR(ENOMEM);
2140  }
2141 
2142 #endif
2143 
2144  if (!frame) {
2145 #if CONFIG_AVFILTER
2146  avfilter_graph_free(&graph);
2147 #endif
2148  return AVERROR(ENOMEM);
2149  }
2150 
2151  for (;;) {
2152  ret = get_video_frame(is, frame);
2153  if (ret < 0)
2154  goto the_end;
2155  if (!ret)
2156  continue;
2157 
2158 #if CONFIG_AVFILTER
2159  if ( last_w != frame->width
2160  || last_h != frame->height
2161  || last_format != frame->format
2162  || last_serial != is->viddec.pkt_serial
2163  || last_vfilter_idx != is->vfilter_idx) {
2165  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2166  last_w, last_h,
2167  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2168  frame->width, frame->height,
2169  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2170  avfilter_graph_free(&graph);
2171  graph = avfilter_graph_alloc();
2172  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2173  SDL_Event event;
2174  event.type = FF_QUIT_EVENT;
2175  event.user.data1 = is;
2176  SDL_PushEvent(&event);
2177  goto the_end;
2178  }
2179  filt_in = is->in_video_filter;
2180  filt_out = is->out_video_filter;
2181  last_w = frame->width;
2182  last_h = frame->height;
2183  last_format = frame->format;
2184  last_serial = is->viddec.pkt_serial;
2185  last_vfilter_idx = is->vfilter_idx;
2186  frame_rate = filt_out->inputs[0]->frame_rate;
2187  }
2188 
2189  ret = av_buffersrc_add_frame(filt_in, frame);
2190  if (ret < 0)
2191  goto the_end;
2192 
2193  while (ret >= 0) {
2194  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2195 
2196  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2197  if (ret < 0) {
2198  if (ret == AVERROR_EOF)
2199  is->viddec.finished = is->viddec.pkt_serial;
2200  ret = 0;
2201  break;
2202  }
2203 
2205  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2206  is->frame_last_filter_delay = 0;
2207  tb = filt_out->inputs[0]->time_base;
2208 #endif
2209  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2210  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2211  ret = queue_picture(is, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec.pkt_serial);
2212  av_frame_unref(frame);
2213 #if CONFIG_AVFILTER
2214  }
2215 #endif
2216 
2217  if (ret < 0)
2218  goto the_end;
2219  }
2220  the_end:
2221 #if CONFIG_AVFILTER
2222  avfilter_graph_free(&graph);
2223 #endif
2224  av_frame_free(&frame);
2225  return 0;
2226 }
2227 
2228 static int subtitle_thread(void *arg)
2229 {
2230  VideoState *is = arg;
2231  Frame *sp;
2232  int got_subtitle;
2233  double pts;
2234 
2235  for (;;) {
2236  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2237  return 0;
2238 
2239  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2240  break;
2241 
2242  pts = 0;
2243 
2244  if (got_subtitle && sp->sub.format == 0) {
2245  if (sp->sub.pts != AV_NOPTS_VALUE)
2246  pts = sp->sub.pts / (double)AV_TIME_BASE;
2247  sp->pts = pts;
2248  sp->serial = is->subdec.pkt_serial;
2249  sp->width = is->subdec.avctx->width;
2250  sp->height = is->subdec.avctx->height;
2251  sp->uploaded = 0;
2252 
2253  /* now we can update the picture count */
2254  frame_queue_push(&is->subpq);
2255  } else if (got_subtitle) {
2256  avsubtitle_free(&sp->sub);
2257  }
2258  }
2259  return 0;
2260 }
2261 
2262 /* copy samples for viewing in editor window */
2263 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2264 {
2265  int size, len;
2266 
2267  size = samples_size / sizeof(short);
2268  while (size > 0) {
2270  if (len > size)
2271  len = size;
2272  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2273  samples += len;
2274  is->sample_array_index += len;
2276  is->sample_array_index = 0;
2277  size -= len;
2278  }
2279 }
2280 
2281 /* return the wanted number of samples to get better sync if sync_type is video
2282  * or external master clock */
2283 static int synchronize_audio(VideoState *is, int nb_samples)
2284 {
2285  int wanted_nb_samples = nb_samples;
2286 
2287  /* if not master, then we try to remove or add samples to correct the clock */
2289  double diff, avg_diff;
2290  int min_nb_samples, max_nb_samples;
2291 
2292  diff = get_clock(&is->audclk) - get_master_clock(is);
2293 
2294  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2295  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2297  /* not enough measures to have a correct estimate */
2298  is->audio_diff_avg_count++;
2299  } else {
2300  /* estimate the A-V difference */
2301  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2302 
2303  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2304  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2305  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2306  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2307  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2308  }
2309  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2310  diff, avg_diff, wanted_nb_samples - nb_samples,
2312  }
2313  } else {
2314  /* too big difference : may be initial PTS errors, so
2315  reset A-V filter */
2316  is->audio_diff_avg_count = 0;
2317  is->audio_diff_cum = 0;
2318  }
2319  }
2320 
2321  return wanted_nb_samples;
2322 }
2323 
2324 /**
2325  * Decode one audio frame and return its uncompressed size.
2326  *
2327  * The processed audio frame is decoded, converted if required, and
2328  * stored in is->audio_buf, with size in bytes given by the return
2329  * value.
2330  */
2332 {
2333  int data_size, resampled_data_size;
2334  int64_t dec_channel_layout;
2335  av_unused double audio_clock0;
2336  int wanted_nb_samples;
2337  Frame *af;
2338 
2339  if (is->paused)
2340  return -1;
2341 
2342  do {
2343 #if defined(_WIN32)
2344  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2346  return -1;
2347  av_usleep (1000);
2348  }
2349 #endif
2350  if (!(af = frame_queue_peek_readable(&is->sampq)))
2351  return -1;
2352  frame_queue_next(&is->sampq);
2353  } while (af->serial != is->audioq.serial);
2354 
2356  af->frame->nb_samples,
2357  af->frame->format, 1);
2358 
2359  dec_channel_layout =
2362  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2363 
2364  if (af->frame->format != is->audio_src.fmt ||
2365  dec_channel_layout != is->audio_src.channel_layout ||
2366  af->frame->sample_rate != is->audio_src.freq ||
2367  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2368  swr_free(&is->swr_ctx);
2371  dec_channel_layout, af->frame->format, af->frame->sample_rate,
2372  0, NULL);
2373  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2375  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2378  swr_free(&is->swr_ctx);
2379  return -1;
2380  }
2381  is->audio_src.channel_layout = dec_channel_layout;
2383  is->audio_src.freq = af->frame->sample_rate;
2384  is->audio_src.fmt = af->frame->format;
2385  }
2386 
2387  if (is->swr_ctx) {
2388  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2389  uint8_t **out = &is->audio_buf1;
2390  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2391  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2392  int len2;
2393  if (out_size < 0) {
2394  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2395  return -1;
2396  }
2397  if (wanted_nb_samples != af->frame->nb_samples) {
2398  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2399  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2400  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2401  return -1;
2402  }
2403  }
2404  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2405  if (!is->audio_buf1)
2406  return AVERROR(ENOMEM);
2407  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2408  if (len2 < 0) {
2409  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2410  return -1;
2411  }
2412  if (len2 == out_count) {
2413  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2414  if (swr_init(is->swr_ctx) < 0)
2415  swr_free(&is->swr_ctx);
2416  }
2417  is->audio_buf = is->audio_buf1;
2418  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2419  } else {
2420  is->audio_buf = af->frame->data[0];
2421  resampled_data_size = data_size;
2422  }
2423 
2424  audio_clock0 = is->audio_clock;
2425  /* update the audio clock with the pts */
2426  if (!isnan(af->pts))
2427  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2428  else
2429  is->audio_clock = NAN;
2430  is->audio_clock_serial = af->serial;
2431 #ifdef DEBUG
2432  {
2433  static double last_clock;
2434  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2435  is->audio_clock - last_clock,
2436  is->audio_clock, audio_clock0);
2437  last_clock = is->audio_clock;
2438  }
2439 #endif
2440  return resampled_data_size;
2441 }
2442 
2443 /* prepare a new audio buffer */
2444 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2445 {
2446  VideoState *is = opaque;
2447  int audio_size, len1;
2448 
2450 
2451  while (len > 0) {
2452  if (is->audio_buf_index >= is->audio_buf_size) {
2453  audio_size = audio_decode_frame(is);
2454  if (audio_size < 0) {
2455  /* if error, just output silence */
2456  is->audio_buf = NULL;
2458  } else {
2459  if (is->show_mode != SHOW_MODE_VIDEO)
2460  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2461  is->audio_buf_size = audio_size;
2462  }
2463  is->audio_buf_index = 0;
2464  }
2465  len1 = is->audio_buf_size - is->audio_buf_index;
2466  if (len1 > len)
2467  len1 = len;
2468  if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2469  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2470  else {
2471  memset(stream, 0, len1);
2472  if (!is->muted && is->audio_buf)
2473  SDL_MixAudio(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1, is->audio_volume);
2474  }
2475  len -= len1;
2476  stream += len1;
2477  is->audio_buf_index += len1;
2478  }
2480  /* Let's assume the audio driver that is used by SDL has two periods. */
2481  if (!isnan(is->audio_clock)) {
2483  sync_clock_to_slave(&is->extclk, &is->audclk);
2484  }
2485 }
2486 
2487 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2488 {
2489  SDL_AudioSpec wanted_spec, spec;
2490  const char *env;
2491  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2492  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2493  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2494 
2495  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2496  if (env) {
2497  wanted_nb_channels = atoi(env);
2498  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2499  }
2500  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2501  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2502  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2503  }
2504  wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2505  wanted_spec.channels = wanted_nb_channels;
2506  wanted_spec.freq = wanted_sample_rate;
2507  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2508  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2509  return -1;
2510  }
2511  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2512  next_sample_rate_idx--;
2513  wanted_spec.format = AUDIO_S16SYS;
2514  wanted_spec.silence = 0;
2515  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2516  wanted_spec.callback = sdl_audio_callback;
2517  wanted_spec.userdata = opaque;
2518  while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2519  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2520  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2521  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2522  if (!wanted_spec.channels) {
2523  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2524  wanted_spec.channels = wanted_nb_channels;
2525  if (!wanted_spec.freq) {
2527  "No more combinations to try, audio open failed\n");
2528  return -1;
2529  }
2530  }
2531  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2532  }
2533  if (spec.format != AUDIO_S16SYS) {
2535  "SDL advised audio format %d is not supported!\n", spec.format);
2536  return -1;
2537  }
2538  if (spec.channels != wanted_spec.channels) {
2539  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2540  if (!wanted_channel_layout) {
2542  "SDL advised channel count %d is not supported!\n", spec.channels);
2543  return -1;
2544  }
2545  }
2546 
2547  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2548  audio_hw_params->freq = spec.freq;
2549  audio_hw_params->channel_layout = wanted_channel_layout;
2550  audio_hw_params->channels = spec.channels;
2551  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
2552  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2553  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2554  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2555  return -1;
2556  }
2557  return spec.size;
2558 }
2559 
2560 /* open a given stream. Return 0 if OK */
2561 static int stream_component_open(VideoState *is, int stream_index)
2562 {
2563  AVFormatContext *ic = is->ic;
2564  AVCodecContext *avctx;
2565  AVCodec *codec;
2566  const char *forced_codec_name = NULL;
2567  AVDictionary *opts = NULL;
2568  AVDictionaryEntry *t = NULL;
2569  int sample_rate, nb_channels;
2570  int64_t channel_layout;
2571  int ret = 0;
2572  int stream_lowres = lowres;
2573 
2574  if (stream_index < 0 || stream_index >= ic->nb_streams)
2575  return -1;
2576 
2577  avctx = avcodec_alloc_context3(NULL);
2578  if (!avctx)
2579  return AVERROR(ENOMEM);
2580 
2581  ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2582  if (ret < 0)
2583  goto fail;
2584  av_codec_set_pkt_timebase(avctx, ic->streams[stream_index]->time_base);
2585 
2586  codec = avcodec_find_decoder(avctx->codec_id);
2587 
2588  switch(avctx->codec_type){
2589  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2590  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2591  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2592  }
2593  if (forced_codec_name)
2594  codec = avcodec_find_decoder_by_name(forced_codec_name);
2595  if (!codec) {
2596  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2597  "No codec could be found with name '%s'\n", forced_codec_name);
2598  else av_log(NULL, AV_LOG_WARNING,
2599  "No codec could be found with id %d\n", avctx->codec_id);
2600  ret = AVERROR(EINVAL);
2601  goto fail;
2602  }
2603 
2604  avctx->codec_id = codec->id;
2605  if(stream_lowres > av_codec_get_max_lowres(codec)){
2606  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2607  av_codec_get_max_lowres(codec));
2608  stream_lowres = av_codec_get_max_lowres(codec);
2609  }
2610  av_codec_set_lowres(avctx, stream_lowres);
2611 
2612 #if FF_API_EMU_EDGE
2613  if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2614 #endif
2615  if (fast)
2616  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2617 #if FF_API_EMU_EDGE
2618  if(codec->capabilities & AV_CODEC_CAP_DR1)
2619  avctx->flags |= CODEC_FLAG_EMU_EDGE;
2620 #endif
2621 
2622  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2623  if (!av_dict_get(opts, "threads", NULL, 0))
2624  av_dict_set(&opts, "threads", "auto", 0);
2625  if (stream_lowres)
2626  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2627  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2628  av_dict_set(&opts, "refcounted_frames", "1", 0);
2629  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2630  goto fail;
2631  }
2632  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2633  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2635  goto fail;
2636  }
2637 
2638  is->eof = 0;
2639  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2640  switch (avctx->codec_type) {
2641  case AVMEDIA_TYPE_AUDIO:
2642 #if CONFIG_AVFILTER
2643  {
2644  AVFilterLink *link;
2645 
2646  is->audio_filter_src.freq = avctx->sample_rate;
2647  is->audio_filter_src.channels = avctx->channels;
2648  is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2649  is->audio_filter_src.fmt = avctx->sample_fmt;
2650  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2651  goto fail;
2652  link = is->out_audio_filter->inputs[0];
2653  sample_rate = link->sample_rate;
2654  nb_channels = avfilter_link_get_channels(link);
2655  channel_layout = link->channel_layout;
2656  }
2657 #else
2658  sample_rate = avctx->sample_rate;
2659  nb_channels = avctx->channels;
2660  channel_layout = avctx->channel_layout;
2661 #endif
2662 
2663  /* prepare audio output */
2664  if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2665  goto fail;
2666  is->audio_hw_buf_size = ret;
2667  is->audio_src = is->audio_tgt;
2668  is->audio_buf_size = 0;
2669  is->audio_buf_index = 0;
2670 
2671  /* init averaging filter */
2672  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2673  is->audio_diff_avg_count = 0;
2674  /* since we do not have a precise anough audio FIFO fullness,
2675  we correct audio sync only if larger than this threshold */
2677 
2678  is->audio_stream = stream_index;
2679  is->audio_st = ic->streams[stream_index];
2680 
2681  decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
2683  is->auddec.start_pts = is->audio_st->start_time;
2685  }
2686  if ((ret = decoder_start(&is->auddec, audio_thread, is)) < 0)
2687  goto out;
2688  SDL_PauseAudio(0);
2689  break;
2690  case AVMEDIA_TYPE_VIDEO:
2691  is->video_stream = stream_index;
2692  is->video_st = ic->streams[stream_index];
2693 
2694  decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
2695  if ((ret = decoder_start(&is->viddec, video_thread, is)) < 0)
2696  goto out;
2697  is->queue_attachments_req = 1;
2698  break;
2699  case AVMEDIA_TYPE_SUBTITLE:
2700  is->subtitle_stream = stream_index;
2701  is->subtitle_st = ic->streams[stream_index];
2702 
2703  decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
2704  if ((ret = decoder_start(&is->subdec, subtitle_thread, is)) < 0)
2705  goto out;
2706  break;
2707  default:
2708  break;
2709  }
2710  goto out;
2711 
2712 fail:
2713  avcodec_free_context(&avctx);
2714 out:
2715  av_dict_free(&opts);
2716 
2717  return ret;
2718 }
2719 
2720 static int decode_interrupt_cb(void *ctx)
2721 {
2722  VideoState *is = ctx;
2723  return is->abort_request;
2724 }
2725 
2726 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2727  return stream_id < 0 ||
2728  queue->abort_request ||
2730  queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2731 }
2732 
2734 {
2735  if( !strcmp(s->iformat->name, "rtp")
2736  || !strcmp(s->iformat->name, "rtsp")
2737  || !strcmp(s->iformat->name, "sdp")
2738  )
2739  return 1;
2740 
2741  if(s->pb && ( !strncmp(s->filename, "rtp:", 4)
2742  || !strncmp(s->filename, "udp:", 4)
2743  )
2744  )
2745  return 1;
2746  return 0;
2747 }
2748 
2749 /* this thread gets the stream from the disk or the network */
2750 static int read_thread(void *arg)
2751 {
2752  VideoState *is = arg;
2753  AVFormatContext *ic = NULL;
2754  int err, i, ret;
2755  int st_index[AVMEDIA_TYPE_NB];
2756  AVPacket pkt1, *pkt = &pkt1;
2757  int64_t stream_start_time;
2758  int pkt_in_play_range = 0;
2759  AVDictionaryEntry *t;
2760  AVDictionary **opts;
2761  int orig_nb_streams;
2762  SDL_mutex *wait_mutex = SDL_CreateMutex();
2763  int scan_all_pmts_set = 0;
2764  int64_t pkt_ts;
2765 
2766  if (!wait_mutex) {
2767  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2768  ret = AVERROR(ENOMEM);
2769  goto fail;
2770  }
2771 
2772  memset(st_index, -1, sizeof(st_index));
2773  is->last_video_stream = is->video_stream = -1;
2774  is->last_audio_stream = is->audio_stream = -1;
2775  is->last_subtitle_stream = is->subtitle_stream = -1;
2776  is->eof = 0;
2777 
2778  ic = avformat_alloc_context();
2779  if (!ic) {
2780  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2781  ret = AVERROR(ENOMEM);
2782  goto fail;
2783  }
2785  ic->interrupt_callback.opaque = is;
2786  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2787  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2788  scan_all_pmts_set = 1;
2789  }
2790  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2791  if (err < 0) {
2792  print_error(is->filename, err);
2793  ret = -1;
2794  goto fail;
2795  }
2796  if (scan_all_pmts_set)
2797  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2798 
2800  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2802  goto fail;
2803  }
2804  is->ic = ic;
2805 
2806  if (genpts)
2807  ic->flags |= AVFMT_FLAG_GENPTS;
2808 
2810 
2812  orig_nb_streams = ic->nb_streams;
2813 
2814  err = avformat_find_stream_info(ic, opts);
2815 
2816  for (i = 0; i < orig_nb_streams; i++)
2817  av_dict_free(&opts[i]);
2818  av_freep(&opts);
2819 
2820  if (err < 0) {
2822  "%s: could not find codec parameters\n", is->filename);
2823  ret = -1;
2824  goto fail;
2825  }
2826 
2827  if (ic->pb)
2828  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2829 
2830  if (seek_by_bytes < 0)
2831  seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2832 
2833  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2834 
2835  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2836  window_title = av_asprintf("%s - %s", t->value, input_filename);
2837 
2838  /* if seeking requested, we execute it */
2839  if (start_time != AV_NOPTS_VALUE) {
2840  int64_t timestamp;
2841 
2842  timestamp = start_time;
2843  /* add the stream start time */
2844  if (ic->start_time != AV_NOPTS_VALUE)
2845  timestamp += ic->start_time;
2846  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2847  if (ret < 0) {
2848  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2849  is->filename, (double)timestamp / AV_TIME_BASE);
2850  }
2851  }
2852 
2853  is->realtime = is_realtime(ic);
2854 
2855  if (show_status)
2856  av_dump_format(ic, 0, is->filename, 0);
2857 
2858  for (i = 0; i < ic->nb_streams; i++) {
2859  AVStream *st = ic->streams[i];
2860  enum AVMediaType type = st->codecpar->codec_type;
2861  st->discard = AVDISCARD_ALL;
2862  if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
2863  if (avformat_match_stream_specifier(ic, st, wanted_stream_spec[type]) > 0)
2864  st_index[type] = i;
2865  }
2866  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2867  if (wanted_stream_spec[i] && st_index[i] == -1) {
2868  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2869  st_index[i] = INT_MAX;
2870  }
2871  }
2872 
2873  if (!video_disable)
2874  st_index[AVMEDIA_TYPE_VIDEO] =
2876  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2877  if (!audio_disable)
2878  st_index[AVMEDIA_TYPE_AUDIO] =
2880  st_index[AVMEDIA_TYPE_AUDIO],
2881  st_index[AVMEDIA_TYPE_VIDEO],
2882  NULL, 0);
2884  st_index[AVMEDIA_TYPE_SUBTITLE] =
2886  st_index[AVMEDIA_TYPE_SUBTITLE],
2887  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2888  st_index[AVMEDIA_TYPE_AUDIO] :
2889  st_index[AVMEDIA_TYPE_VIDEO]),
2890  NULL, 0);
2891 
2892  is->show_mode = show_mode;
2893  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2894  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2895  AVCodecParameters *codecpar = st->codecpar;
2897  if (codecpar->width)
2898  set_default_window_size(codecpar->width, codecpar->height, sar);
2899  }
2900 
2901  /* open the streams */
2902  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2903  stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2904  }
2905 
2906  ret = -1;
2907  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2908  ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2909  }
2910  if (is->show_mode == SHOW_MODE_NONE)
2911  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2912 
2913  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2914  stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2915  }
2916 
2917  if (is->video_stream < 0 && is->audio_stream < 0) {
2918  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2919  is->filename);
2920  ret = -1;
2921  goto fail;
2922  }
2923 
2924  if (infinite_buffer < 0 && is->realtime)
2925  infinite_buffer = 1;
2926 
2927  for (;;) {
2928  if (is->abort_request)
2929  break;
2930  if (is->paused != is->last_paused) {
2931  is->last_paused = is->paused;
2932  if (is->paused)
2933  is->read_pause_return = av_read_pause(ic);
2934  else
2935  av_read_play(ic);
2936  }
2937 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2938  if (is->paused &&
2939  (!strcmp(ic->iformat->name, "rtsp") ||
2940  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2941  /* wait 10 ms to avoid trying to get another packet */
2942  /* XXX: horrible */
2943  SDL_Delay(10);
2944  continue;
2945  }
2946 #endif
2947  if (is->seek_req) {
2948  int64_t seek_target = is->seek_pos;
2949  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2950  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2951 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2952 // of the seek_pos/seek_rel variables
2953 
2954  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2955  if (ret < 0) {
2957  "%s: error while seeking\n", is->ic->filename);
2958  } else {
2959  if (is->audio_stream >= 0) {
2960  packet_queue_flush(&is->audioq);
2961  packet_queue_put(&is->audioq, &flush_pkt);
2962  }
2963  if (is->subtitle_stream >= 0) {
2965  packet_queue_put(&is->subtitleq, &flush_pkt);
2966  }
2967  if (is->video_stream >= 0) {
2968  packet_queue_flush(&is->videoq);
2969  packet_queue_put(&is->videoq, &flush_pkt);
2970  }
2971  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2972  set_clock(&is->extclk, NAN, 0);
2973  } else {
2974  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
2975  }
2976  }
2977  is->seek_req = 0;
2978  is->queue_attachments_req = 1;
2979  is->eof = 0;
2980  if (is->paused)
2981  step_to_next_frame(is);
2982  }
2983  if (is->queue_attachments_req) {
2985  AVPacket copy;
2986  if ((ret = av_copy_packet(&copy, &is->video_st->attached_pic)) < 0)
2987  goto fail;
2988  packet_queue_put(&is->videoq, &copy);
2990  }
2991  is->queue_attachments_req = 0;
2992  }
2993 
2994  /* if the queue are full, no need to read more */
2995  if (infinite_buffer<1 &&
2996  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2997  || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
3000  /* wait 10 ms */
3001  SDL_LockMutex(wait_mutex);
3002  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3003  SDL_UnlockMutex(wait_mutex);
3004  continue;
3005  }
3006  if (!is->paused &&
3007  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3008  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3009  if (loop != 1 && (!loop || --loop)) {
3010  stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
3011  } else if (autoexit) {
3012  ret = AVERROR_EOF;
3013  goto fail;
3014  }
3015  }
3016  ret = av_read_frame(ic, pkt);
3017  if (ret < 0) {
3018  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3019  if (is->video_stream >= 0)
3021  if (is->audio_stream >= 0)
3023  if (is->subtitle_stream >= 0)
3025  is->eof = 1;
3026  }
3027  if (ic->pb && ic->pb->error)
3028  break;
3029  SDL_LockMutex(wait_mutex);
3030  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3031  SDL_UnlockMutex(wait_mutex);
3032  continue;
3033  } else {
3034  is->eof = 0;
3035  }
3036  /* check if packet is in play range specified by user, then queue, otherwise discard */
3037  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3038  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3039  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3040  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3041  av_q2d(ic->streams[pkt->stream_index]->time_base) -
3042  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3043  <= ((double)duration / 1000000);
3044  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3045  packet_queue_put(&is->audioq, pkt);
3046  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3048  packet_queue_put(&is->videoq, pkt);
3049  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3050  packet_queue_put(&is->subtitleq, pkt);
3051  } else {
3052  av_packet_unref(pkt);
3053  }
3054  }
3055 
3056  ret = 0;
3057  fail:
3058  if (ic && !is->ic)
3059  avformat_close_input(&ic);
3060 
3061  if (ret != 0) {
3062  SDL_Event event;
3063 
3064  event.type = FF_QUIT_EVENT;
3065  event.user.data1 = is;
3066  SDL_PushEvent(&event);
3067  }
3068  SDL_DestroyMutex(wait_mutex);
3069  return 0;
3070 }
3071 
3072 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
3073 {
3074  VideoState *is;
3075 
3076  is = av_mallocz(sizeof(VideoState));
3077  if (!is)
3078  return NULL;
3079  is->filename = av_strdup(filename);
3080  if (!is->filename)
3081  goto fail;
3082  is->iformat = iformat;
3083  is->ytop = 0;
3084  is->xleft = 0;
3085 
3086  /* start video display */
3087  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3088  goto fail;
3089  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3090  goto fail;
3091  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3092  goto fail;
3093 
3094  if (packet_queue_init(&is->videoq) < 0 ||
3095  packet_queue_init(&is->audioq) < 0 ||
3096  packet_queue_init(&is->subtitleq) < 0)
3097  goto fail;
3098 
3099  if (!(is->continue_read_thread = SDL_CreateCond())) {
3100  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3101  goto fail;
3102  }
3103 
3104  init_clock(&is->vidclk, &is->videoq.serial);
3105  init_clock(&is->audclk, &is->audioq.serial);
3106  init_clock(&is->extclk, &is->extclk.serial);
3107  is->audio_clock_serial = -1;
3108  is->audio_volume = SDL_MIX_MAXVOLUME;
3109  is->muted = 0;
3110  is->av_sync_type = av_sync_type;
3111  is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
3112  if (!is->read_tid) {
3113  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3114 fail:
3115  stream_close(is);
3116  return NULL;
3117  }
3118  return is;
3119 }
3120 
3122 {
3123  AVFormatContext *ic = is->ic;
3124  int start_index, stream_index;
3125  int old_index;
3126  AVStream *st;
3127  AVProgram *p = NULL;
3128  int nb_streams = is->ic->nb_streams;
3129 
3130  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3131  start_index = is->last_video_stream;
3132  old_index = is->video_stream;
3133  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3134  start_index = is->last_audio_stream;
3135  old_index = is->audio_stream;
3136  } else {
3137  start_index = is->last_subtitle_stream;
3138  old_index = is->subtitle_stream;
3139  }
3140  stream_index = start_index;
3141 
3142  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3144  if (p) {
3145  nb_streams = p->nb_stream_indexes;
3146  for (start_index = 0; start_index < nb_streams; start_index++)
3147  if (p->stream_index[start_index] == stream_index)
3148  break;
3149  if (start_index == nb_streams)
3150  start_index = -1;
3151  stream_index = start_index;
3152  }
3153  }
3154 
3155  for (;;) {
3156  if (++stream_index >= nb_streams)
3157  {
3158  if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3159  {
3160  stream_index = -1;
3161  is->last_subtitle_stream = -1;
3162  goto the_end;
3163  }
3164  if (start_index == -1)
3165  return;
3166  stream_index = 0;
3167  }
3168  if (stream_index == start_index)
3169  return;
3170  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3171  if (st->codecpar->codec_type == codec_type) {
3172  /* check that parameters are OK */
3173  switch (codec_type) {
3174  case AVMEDIA_TYPE_AUDIO:
3175  if (st->codecpar->sample_rate != 0 &&
3176  st->codecpar->channels != 0)
3177  goto the_end;
3178  break;
3179  case AVMEDIA_TYPE_VIDEO:
3180  case AVMEDIA_TYPE_SUBTITLE:
3181  goto the_end;
3182  default:
3183  break;
3184  }
3185  }
3186  }
3187  the_end:
3188  if (p && stream_index != -1)
3189  stream_index = p->stream_index[stream_index];
3190  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3191  av_get_media_type_string(codec_type),
3192  old_index,
3193  stream_index);
3194 
3195  stream_component_close(is, old_index);
3196  stream_component_open(is, stream_index);
3197 }
3198 
3199 
3201 {
3203  SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
3204 }
3205 
3207 {
3208  int next = is->show_mode;
3209  do {
3210  next = (next + 1) % SHOW_MODE_NB;
3211  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3212  if (is->show_mode != next) {
3213  is->force_refresh = 1;
3214  is->show_mode = next;
3215  }
3216 }
3217 
3218 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3219  double remaining_time = 0.0;
3220  SDL_PumpEvents();
3221  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3223  SDL_ShowCursor(0);
3224  cursor_hidden = 1;
3225  }
3226  if (remaining_time > 0.0)
3227  av_usleep((int64_t)(remaining_time * 1000000.0));
3228  remaining_time = REFRESH_RATE;
3229  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3230  video_refresh(is, &remaining_time);
3231  SDL_PumpEvents();
3232  }
3233 }
3234 
3235 static void seek_chapter(VideoState *is, int incr)
3236 {
3237  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3238  int i;
3239 
3240  if (!is->ic->nb_chapters)
3241  return;
3242 
3243  /* find the current chapter */
3244  for (i = 0; i < is->ic->nb_chapters; i++) {
3245  AVChapter *ch = is->ic->chapters[i];
3246  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3247  i--;
3248  break;
3249  }
3250  }
3251 
3252  i += incr;
3253  i = FFMAX(i, 0);
3254  if (i >= is->ic->nb_chapters)
3255  return;
3256 
3257  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3258  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3259  AV_TIME_BASE_Q), 0, 0);
3260 }
3261 
3262 /* handle an event sent by the GUI */
3263 static void event_loop(VideoState *cur_stream)
3264 {
3265  SDL_Event event;
3266  double incr, pos, frac;
3267 
3268  for (;;) {
3269  double x;
3270  refresh_loop_wait_event(cur_stream, &event);
3271  switch (event.type) {
3272  case SDL_KEYDOWN:
3273  if (exit_on_keydown) {
3274  do_exit(cur_stream);
3275  break;
3276  }
3277  switch (event.key.keysym.sym) {
3278  case SDLK_ESCAPE:
3279  case SDLK_q:
3280  do_exit(cur_stream);
3281  break;
3282  case SDLK_f:
3283  toggle_full_screen(cur_stream);
3284  cur_stream->force_refresh = 1;
3285  break;
3286  case SDLK_p:
3287  case SDLK_SPACE:
3288  toggle_pause(cur_stream);
3289  break;
3290  case SDLK_m:
3291  toggle_mute(cur_stream);
3292  break;
3293  case SDLK_KP_MULTIPLY:
3294  case SDLK_0:
3295  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3296  break;
3297  case SDLK_KP_DIVIDE:
3298  case SDLK_9:
3299  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3300  break;
3301  case SDLK_s: // S: Step to next frame
3302  step_to_next_frame(cur_stream);
3303  break;
3304  case SDLK_a:
3306  break;
3307  case SDLK_v:
3309  break;
3310  case SDLK_c:
3314  break;
3315  case SDLK_t:
3317  break;
3318  case SDLK_w:
3319 #if CONFIG_AVFILTER
3320  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3321  if (++cur_stream->vfilter_idx >= nb_vfilters)
3322  cur_stream->vfilter_idx = 0;
3323  } else {
3324  cur_stream->vfilter_idx = 0;
3325  toggle_audio_display(cur_stream);
3326  }
3327 #else
3328  toggle_audio_display(cur_stream);
3329 #endif
3330  break;
3331  case SDLK_PAGEUP:
3332  if (cur_stream->ic->nb_chapters <= 1) {
3333  incr = 600.0;
3334  goto do_seek;
3335  }
3336  seek_chapter(cur_stream, 1);
3337  break;
3338  case SDLK_PAGEDOWN:
3339  if (cur_stream->ic->nb_chapters <= 1) {
3340  incr = -600.0;
3341  goto do_seek;
3342  }
3343  seek_chapter(cur_stream, -1);
3344  break;
3345  case SDLK_LEFT:
3346  incr = -10.0;
3347  goto do_seek;
3348  case SDLK_RIGHT:
3349  incr = 10.0;
3350  goto do_seek;
3351  case SDLK_UP:
3352  incr = 60.0;
3353  goto do_seek;
3354  case SDLK_DOWN:
3355  incr = -60.0;
3356  do_seek:
3357  if (seek_by_bytes) {
3358  pos = -1;
3359  if (pos < 0 && cur_stream->video_stream >= 0)
3360  pos = frame_queue_last_pos(&cur_stream->pictq);
3361  if (pos < 0 && cur_stream->audio_stream >= 0)
3362  pos = frame_queue_last_pos(&cur_stream->sampq);
3363  if (pos < 0)
3364  pos = avio_tell(cur_stream->ic->pb);
3365  if (cur_stream->ic->bit_rate)
3366  incr *= cur_stream->ic->bit_rate / 8.0;
3367  else
3368  incr *= 180000.0;
3369  pos += incr;
3370  stream_seek(cur_stream, pos, incr, 1);
3371  } else {
3372  pos = get_master_clock(cur_stream);
3373  if (isnan(pos))
3374  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3375  pos += incr;
3376  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3377  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3378  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3379  }
3380  break;
3381  default:
3382  break;
3383  }
3384  break;
3385  case SDL_MOUSEBUTTONDOWN:
3386  if (exit_on_mousedown) {
3387  do_exit(cur_stream);
3388  break;
3389  }
3390  if (event.button.button == SDL_BUTTON_LEFT) {
3391  static int64_t last_mouse_left_click = 0;
3392  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3393  toggle_full_screen(cur_stream);
3394  cur_stream->force_refresh = 1;
3395  last_mouse_left_click = 0;
3396  } else {
3397  last_mouse_left_click = av_gettime_relative();
3398  }
3399  }
3400  case SDL_MOUSEMOTION:
3401  if (cursor_hidden) {
3402  SDL_ShowCursor(1);
3403  cursor_hidden = 0;
3404  }
3406  if (event.type == SDL_MOUSEBUTTONDOWN) {
3407  if (event.button.button != SDL_BUTTON_RIGHT)
3408  break;
3409  x = event.button.x;
3410  } else {
3411  if (!(event.motion.state & SDL_BUTTON_RMASK))
3412  break;
3413  x = event.motion.x;
3414  }
3415  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3416  uint64_t size = avio_size(cur_stream->ic->pb);
3417  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3418  } else {
3419  int64_t ts;
3420  int ns, hh, mm, ss;
3421  int tns, thh, tmm, tss;
3422  tns = cur_stream->ic->duration / 1000000LL;
3423  thh = tns / 3600;
3424  tmm = (tns % 3600) / 60;
3425  tss = (tns % 60);
3426  frac = x / cur_stream->width;
3427  ns = frac * tns;
3428  hh = ns / 3600;
3429  mm = (ns % 3600) / 60;
3430  ss = (ns % 60);
3432  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3433  hh, mm, ss, thh, tmm, tss);
3434  ts = frac * cur_stream->ic->duration;
3435  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3436  ts += cur_stream->ic->start_time;
3437  stream_seek(cur_stream, ts, 0, 0);
3438  }
3439  break;
3440  case SDL_WINDOWEVENT:
3441  switch (event.window.event) {
3442  case SDL_WINDOWEVENT_RESIZED:
3443  screen_width = cur_stream->width = event.window.data1;
3444  screen_height = cur_stream->height = event.window.data2;
3445  if (cur_stream->vis_texture) {
3446  SDL_DestroyTexture(cur_stream->vis_texture);
3447  cur_stream->vis_texture = NULL;
3448  }
3449  case SDL_WINDOWEVENT_EXPOSED:
3450  cur_stream->force_refresh = 1;
3451  }
3452  break;
3453  case SDL_QUIT:
3454  case FF_QUIT_EVENT:
3455  do_exit(cur_stream);
3456  break;
3457  case FF_ALLOC_EVENT:
3458  alloc_picture(event.user.data1);
3459  break;
3460  default:
3461  break;
3462  }
3463  }
3464 }
3465 
3466 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3467 {
3468  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3469  return opt_default(NULL, "video_size", arg);
3470 }
3471 
3472 static int opt_width(void *optctx, const char *opt, const char *arg)
3473 {
3474  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3475  return 0;
3476 }
3477 
3478 static int opt_height(void *optctx, const char *opt, const char *arg)
3479 {
3480  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3481  return 0;
3482 }
3483 
3484 static int opt_format(void *optctx, const char *opt, const char *arg)
3485 {
3486  file_iformat = av_find_input_format(arg);
3487  if (!file_iformat) {
3488  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3489  return AVERROR(EINVAL);
3490  }
3491  return 0;
3492 }
3493 
3494 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3495 {
3496  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3497  return opt_default(NULL, "pixel_format", arg);
3498 }
3499 
3500 static int opt_sync(void *optctx, const char *opt, const char *arg)
3501 {
3502  if (!strcmp(arg, "audio"))
3504  else if (!strcmp(arg, "video"))
3506  else if (!strcmp(arg, "ext"))
3508  else {
3509  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3510  exit(1);
3511  }
3512  return 0;
3513 }
3514 
3515 static int opt_seek(void *optctx, const char *opt, const char *arg)
3516 {
3517  start_time = parse_time_or_die(opt, arg, 1);
3518  return 0;
3519 }
3520 
3521 static int opt_duration(void *optctx, const char *opt, const char *arg)
3522 {
3523  duration = parse_time_or_die(opt, arg, 1);
3524  return 0;
3525 }
3526 
3527 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3528 {
3529  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3530  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3531  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3532  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3533  return 0;
3534 }
3535 
3536 static void opt_input_file(void *optctx, const char *filename)
3537 {
3538  if (input_filename) {
3540  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3541  filename, input_filename);
3542  exit(1);
3543  }
3544  if (!strcmp(filename, "-"))
3545  filename = "pipe:";
3546  input_filename = filename;
3547 }
3548 
3549 static int opt_codec(void *optctx, const char *opt, const char *arg)
3550 {
3551  const char *spec = strchr(opt, ':');
3552  if (!spec) {
3554  "No media specifier was specified in '%s' in option '%s'\n",
3555  arg, opt);
3556  return AVERROR(EINVAL);
3557  }
3558  spec++;
3559  switch (spec[0]) {
3560  case 'a' : audio_codec_name = arg; break;
3561  case 's' : subtitle_codec_name = arg; break;
3562  case 'v' : video_codec_name = arg; break;
3563  default:
3565  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3566  return AVERROR(EINVAL);
3567  }
3568  return 0;
3569 }
3570 
3571 static int dummy;
3572 
3573 static const OptionDef options[] = {
3574 #include "cmdutils_common_opts.h"
3575  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3576  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3577  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3578  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3579  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3580  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3581  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3582  { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3583  { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3584  { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3585  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3586  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3587  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3588  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3589  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3590  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3591  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3592  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3593  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3594  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3595  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3596  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3597  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3598  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3599  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3600  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3601  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3602  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3603  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3604 #if CONFIG_AVFILTER
3605  { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3606  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3607 #endif
3608  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3609  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3610  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3611  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3612  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3613  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3614  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3615  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3616  { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3617  { NULL, },
3618 };
3619 
3620 static void show_usage(void)
3621 {
3622  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3623  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3624  av_log(NULL, AV_LOG_INFO, "\n");
3625 }
3626 
3627 void show_help_default(const char *opt, const char *arg)
3628 {
3630  show_usage();
3631  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3632  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3633  printf("\n");
3636 #if !CONFIG_AVFILTER
3638 #else
3640 #endif
3641  printf("\nWhile playing:\n"
3642  "q, ESC quit\n"
3643  "f toggle full screen\n"
3644  "p, SPC pause\n"
3645  "m toggle mute\n"
3646  "9, 0 decrease and increase volume respectively\n"
3647  "/, * decrease and increase volume respectively\n"
3648  "a cycle audio channel in the current program\n"
3649  "v cycle video channel\n"
3650  "t cycle subtitle channel in the current program\n"
3651  "c cycle program\n"
3652  "w cycle video filters or show modes\n"
3653  "s activate frame-step mode\n"
3654  "left/right seek backward/forward 10 seconds\n"
3655  "down/up seek backward/forward 1 minute\n"
3656  "page down/page up seek backward/forward 10 minutes\n"
3657  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3658  "left double-click toggle full screen\n"
3659  );
3660 }
3661 
3662 static int lockmgr(void **mtx, enum AVLockOp op)
3663 {
3664  switch(op) {
3665  case AV_LOCK_CREATE:
3666  *mtx = SDL_CreateMutex();
3667  if(!*mtx) {
3668  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
3669  return 1;
3670  }
3671  return 0;
3672  case AV_LOCK_OBTAIN:
3673  return !!SDL_LockMutex(*mtx);
3674  case AV_LOCK_RELEASE:
3675  return !!SDL_UnlockMutex(*mtx);
3676  case AV_LOCK_DESTROY:
3677  SDL_DestroyMutex(*mtx);
3678  return 0;
3679  }
3680  return 1;
3681 }
3682 
3683 /* Called from the main */
3684 int main(int argc, char **argv)
3685 {
3686  int flags;
3687  VideoState *is;
3688 
3689  init_dynload();
3690 
3692  parse_loglevel(argc, argv, options);
3693 
3694  /* register all codecs, demux and protocols */
3695 #if CONFIG_AVDEVICE
3697 #endif
3698 #if CONFIG_AVFILTER
3700 #endif
3701  av_register_all();
3703 
3704  init_opts();
3705 
3706  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3707  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3708 
3709  show_banner(argc, argv, options);
3710 
3711  parse_options(NULL, argc, argv, options, opt_input_file);
3712 
3713  if (!input_filename) {
3714  show_usage();
3715  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3717  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3718  exit(1);
3719  }
3720 
3721  if (display_disable) {
3722  video_disable = 1;
3723  }
3724  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3725  if (audio_disable)
3726  flags &= ~SDL_INIT_AUDIO;
3727  else {
3728  /* Try to work around an occasional ALSA buffer underflow issue when the
3729  * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3730  if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3731  SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
3732  }
3733  if (display_disable)
3734  flags &= ~SDL_INIT_VIDEO;
3735  if (SDL_Init (flags)) {
3736  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3737  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3738  exit(1);
3739  }
3740 
3741  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3742  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3743 
3745  av_log(NULL, AV_LOG_FATAL, "Could not initialize lock manager!\n");
3746  do_exit(NULL);
3747  }
3748 
3749  av_init_packet(&flush_pkt);
3750  flush_pkt.data = (uint8_t *)&flush_pkt;
3751 
3752  is = stream_open(input_filename, file_iformat);
3753  if (!is) {
3754  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3755  do_exit(NULL);
3756  }
3757 
3758  event_loop(is);
3759 
3760  /* never returns */
3761 
3762  return 0;
3763 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1543
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:94
AVFilterContext ** filters
Definition: avfilter.h:788
static void packet_queue_abort(PacketQueue *q)
Definition: ffplay.c:492
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3527
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:119
static void frame_queue_push(FrameQueue *f)
Definition: ffplay.c:739
static void video_image_display(VideoState *is)
Definition: ffplay.c:902
#define NULL
Definition: coverity.c:32
Clock audclk
Definition: ffplay.c:222
const char const char void * val
Definition: avisynth_c.h:771
#define AVFMT_NOBINSEARCH
Format does not allow to fall back on binary search via read_timestamp.
Definition: avformat.h:492
static void packet_queue_flush(PacketQueue *q)
Definition: ffplay.c:467
static int upload_texture(SDL_Texture *tex, AVFrame *frame, struct SwsContext **img_convert_ctx)
Definition: ffplay.c:861
const char * s
Definition: avisynth_c.h:768
int width
Definition: ffplay.c:294
#define OPT_EXPERT
Definition: cmdutils.h:168
static double get_clock(Clock *c)
Definition: ffplay.c:1324
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:309
enum AVSampleFormat fmt
Definition: ffplay.c:138
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3478
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffplay.c:64
static void copy(const float *p1, float *p2, const int length)
SDL_cond * cond
Definition: ffplay.c:180
int64_t pts_correction_num_faulty_dts
Number of incorrect PTS values so far.
Definition: avcodec.h:3420
static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
Definition: ffplay.c:2487
FrameQueue pictq
Definition: ffplay.c:226
Decoder auddec
Definition: ffplay.c:230
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:92
AVStream * subtitle_st
Definition: ffplay.c:279
This structure describes decoded (raw) audio or video data.
Definition: frame.h:184
#define VIDEO_PICTURE_QUEUE_SIZE
Definition: ffplay.c:129
static SDL_Renderer * renderer
Definition: ffplay.c:363
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3923
#define SWS_BICUBIC
Definition: swscale.h:60
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1592
double rdftspeed
Definition: ffplay.c:343
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
double frame_timer
Definition: ffplay.c:282
static AVInputFormat * file_iformat
Definition: ffplay.c:312
#define OPT_VIDEO
Definition: cmdutils.h:170
static void opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3536
double get_rotation(AVStream *st)
Definition: cmdutils.c:2098
int av_lockmgr_register(int(*cb)(void **mutex, enum AVLockOp op))
Register a user provided lock manager supporting the operations specified by AVLockOp.
Definition: utils.c:3830
misc image utilities
static int opt_format(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3484
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:76
Unlock the mutex.
Definition: avcodec.h:6150
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AVRational next_pts_tb
Definition: ffplay.c:202
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1372
Main libavfilter public API header.
int rindex
Definition: ffplay.c:173
#define AV_SYNC_FRAMEDUP_THRESHOLD
Definition: ffplay.c:84
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:79
static int default_height
Definition: ffplay.c:316
Memory buffer source API.
double frame_last_filter_delay
Definition: ffplay.c:284
FrameQueue sampq
Definition: ffplay.c:228
enum VideoState::ShowMode show_mode
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:187
AVRational sample_aspect_ratio
Video only.
Definition: avcodec.h:4057
int seek_flags
Definition: ffplay.c:215
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:972
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:709
int serial
Definition: ffplay.c:124
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio...
Definition: utils.c:4757
static int64_t cur_time
Definition: ffserver.c:252
#define OPT_AUDIO
Definition: cmdutils.h:171
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3121
void av_codec_set_pkt_timebase(AVCodecContext *avctx, AVRational val)
int num
Numerator.
Definition: rational.h:59
static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3494
int size
Definition: avcodec.h:1602
const char * b
Definition: vf_curves.c:113
static void stream_toggle_pause(VideoState *is)
Definition: ffplay.c:1436
#define DEBUG
Definition: vf_framerate.c:29
MyAVPacketList * first_pkt
Definition: ffplay.c:119
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
Definition: ffplay.c:1422
int av_log2(unsigned v)
Definition: intmath.c:26
static int seek_by_bytes
Definition: ffplay.c:323
double audio_diff_cum
Definition: ffplay.c:240
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
#define REFRESH_RATE
Definition: ffplay.c:100
AVInputFormat * iformat
Definition: ffplay.c:208
enum AVMediaType codec_type
Definition: rtp.c:37
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1440
AVCodecContext * avctx
Definition: ffplay.c:194
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1150
int paused
Definition: ffplay.c:211
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3549
static AVStream * video_stream
static unsigned sws_flags
Definition: ffplay.c:110
int abort_request
Definition: ffplay.c:123
AVS_VideoFrame int int int int new_height
Definition: avisynth_c.h:818
attribute_deprecated int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: utils.c:2316
unsigned num_rects
Definition: avcodec.h:3961
static void set_clock_at(Clock *c, double pts, int serial, double time)
Definition: ffplay.c:1336
static void toggle_pause(VideoState *is)
Definition: ffplay.c:1449
int out_size
Definition: movenc.c:55
double audio_diff_threshold
Definition: ffplay.c:242
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:519
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:252
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:76
discard all
Definition: avcodec.h:787
int64_t channel_layout
Definition: ffplay.c:137
static AVPacket pkt
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
Definition: options.c:95
static int audio_disable
Definition: ffplay.c:319
AVStream * audio_st
Definition: ffplay.c:244
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: utils.c:2643
static const char * audio_codec_name
Definition: ffplay.c:340
#define fn(a)
int serial
Definition: ffplay.c:157
AVCodec.
Definition: avcodec.h:3601
static void seek_chapter(VideoState *is, int incr)
Definition: ffplay.c:3235
double pts_drift
Definition: ffplay.c:145
AVDictionary * filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, AVCodec *codec)
Filter out options for given codec.
Definition: cmdutils.c:2002
This struct describes the properties of an encoded stream.
Definition: avcodec.h:3973
AVLockOp
Lock operation used by lockmgr.
Definition: avcodec.h:6147
int width
Definition: ffplay.c:163
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:791
AVStream * video_st
Definition: ffplay.c:286
Clock extclk
Definition: ffplay.c:224
static VideoState * stream_open(const char *filename, AVInputFormat *iformat)
Definition: ffplay.c:3072
void * opaque
Definition: avio.h:52
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
struct SwsContext * img_convert_ctx
Definition: ffplay.c:289
AVSubtitleRect ** rects
Definition: avcodec.h:3962
Format I/O context.
Definition: avformat.h:1338
static void toggle_audio_display(VideoState *is)
Definition: ffplay.c:3206
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: utils.c:4780
Definition: ffplay.c:154
memory buffer sink API for audio and video
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:279
static int16_t block[64]
Definition: dct.c:113
int av_sync_type
Definition: ffplay.c:236
unsigned int nb_stream_indexes
Definition: avformat.h:1270
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
int rindex_shown
Definition: ffplay.c:178
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:3925
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:315
double pts
Definition: ffplay.c:158
static AVFilter ** last_filter
Definition: avfilter.c:514
Public dictionary API.
double audio_diff_avg_coef
Definition: ffplay.c:241
AVRational start_pts_tb
Definition: ffplay.c:200
static int read_thread(void *arg)
Definition: ffplay.c:2750
int keep_last
Definition: ffplay.c:177
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:133
int rdft_bits
Definition: ffplay.c:271
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:890
int size
Definition: ffplay.c:121
static Frame * frame_queue_peek(FrameQueue *f)
Definition: ffplay.c:692
static int64_t start_time
Definition: ffplay.c:327
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2446
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
Definition: cmdutils.c:98
Lock the mutex.
Definition: avcodec.h:6149
uint8_t
static int nb_streams
Definition: ffprobe.c:254
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:145
int opt_default(void *optctx, const char *opt, const char *arg)
Fallback for options that are not explicitly handled, these will be parsed through AVOptions...
Definition: cmdutils.c:544
static int default_width
Definition: ffplay.c:315
int last_video_stream
Definition: ffplay.c:306
int width
Video only.
Definition: avcodec.h:4047
int last_subtitle_stream
Definition: ffplay.c:306
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:73
AVOptions.
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:678
#define HAS_ARG
Definition: cmdutils.h:166
int audio_hw_buf_size
Definition: ffplay.c:246
static void stream_component_close(VideoState *is, int stream_index)
Definition: ffplay.c:1132
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:202
static int decode_interrupt_cb(void *ctx)
Definition: ffplay.c:2720
int64_t duration
Definition: ffplay.c:122
struct SwrContext * swr_ctx
Definition: ffplay.c:260
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1619
int finished
Definition: ffplay.c:196
static void event_loop(VideoState *cur_stream)
Definition: ffplay.c:3263
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
Definition: ffplay.c:375
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
Definition: utils.c:4801
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:268
static void packet_queue_destroy(PacketQueue *q)
Definition: ffplay.c:485
#define AVFMT_FLAG_GENPTS
Generate missing pts even if it requires parsing future frames.
Definition: avformat.h:1450
static int framedrop
Definition: ffplay.c:337
SDL_Texture * vis_texture
Definition: ffplay.c:275
static void alloc_picture(VideoState *is)
Definition: ffplay.c:1682
void init_opts(void)
Initialize the cmdutils option system, in particular allocate the *_opts contexts.
Definition: cmdutils.c:84
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1406
SDL_Texture * bmp
Definition: ffplay.c:161
AVPacket pkt
Definition: ffplay.c:113
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: utils.c:4223
int bytes_per_sec
Definition: ffplay.c:140
void avfilter_register_all(void)
Initialize the filter system.
Definition: allfilters.c:40
static AVFrame * frame
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:132
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
void av_codec_set_lowres(AVCodecContext *avctx, int val)
static int64_t audio_callback_time
Definition: ffplay.c:355
#define height
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
static int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
Definition: ffplay.c:386
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1449
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
Definition: ffplay.c:512
static void sigterm_handler(int sig)
Definition: ffplay.c:1246
uint8_t * data
Definition: avcodec.h:1601
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:382
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
int freq
Definition: ffplay.c:135
static int flags
Definition: log.c:57
int avformat_network_init(void)
Do global initialization of network components.
Definition: utils.c:4692
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:91
Frame queue[FRAME_QUEUE_SIZE]
Definition: ffplay.c:172
Definition: mxfdec.c:271
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define sp
Definition: regdef.h:63
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
Definition: ffplay.c:143
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:156
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:505
external API header
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:86
ptrdiff_t size
Definition: opengl_enc.c:101
int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame)
Accessors for some AVFrame fields.
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:3926
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:511
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags, int alt_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:176
static int64_t duration
Definition: ffplay.c:328
AVRational sar
Definition: ffplay.c:166
AVPacket pkt_temp
Definition: ffplay.c:192
unsigned int * stream_index
Definition: avformat.h:1269
#define av_log(a,...)
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
Definition: opt.h:276
PacketQueue videoq
Definition: ffplay.c:287
AVDictionary ** setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts)
Setup AVCodecContext options for avformat_find_stream_info().
Definition: cmdutils.c:2059
AVDictionary * format_opts
Definition: cmdutils.c:72
static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
Definition: ffplay.c:808
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:302
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:104
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
static void toggle_mute(VideoState *is)
Definition: ffplay.c:1455
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:511
Main libavdevice API header.
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
Definition: utils.c:3955
libswresample public header
enum AVCodecID id
Definition: avcodec.h:3615
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: utils.c:3938
int audio_diff_avg_count
Definition: ffplay.c:243
int ytop
Definition: ffplay.c:294
int width
width and height of the video frame
Definition: frame.h:236
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:69
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1554
int seek_req
Definition: ffplay.c:214
int(* callback)(void *)
Definition: avio.h:51
Create a mutex.
Definition: avcodec.h:6148
#define FRAME_QUEUE_SIZE
Definition: ffplay.c:132
static void video_refresh(void *opaque, double *remaining_time)
Definition: ffplay.c:1522
int read_pause_return
Definition: ffplay.c:218
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:488
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:293
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3924
#define AVERROR(e)
Definition: error.h:43
static AVStream * audio_stream
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:749
int avfilter_link_get_channels(AVFilterLink *link)
Get the number of channels of a link.
Definition: avfilter.c:178
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:158
The libswresample context.
#define MIN_FRAMES
Definition: ffplay.c:67
static int decoder_start(Decoder *d, int(*fn)(void *), void *arg)
Definition: ffplay.c:2108
RDFTContext * rdft
Definition: ffplay.c:270
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:765
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:96
#define EXTERNAL_CLOCK_MAX_FRAMES
Definition: ffplay.c:69
static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
Definition: ffplay.c:439
static int autorotate
Definition: ffplay.c:351
int capabilities
Codec capabilities.
Definition: avcodec.h:3620
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:106
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: utils.c:4019
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1473
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
enum AVMediaType codec_type
General type of the encoded data.
Definition: avcodec.h:3977
const char * arg
Definition: jacosubdec.c:66
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1771
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:560
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:396
AVChapter ** chapters
Definition: avformat.h:1544
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:359
simple assert() macros that are a bit more flexible than ISO C assert().
static void stream_close(VideoState *is)
Definition: ffplay.c:1189
int video_stream
Definition: ffplay.c:285
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
int * queue_serial
Definition: ffplay.c:150
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1356
int xpos
Definition: ffplay.c:273
int channels
Definition: ffplay.c:136
static enum ShowMode show_mode
Definition: ffplay.c:339
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1265
#define FFMAX(a, b)
Definition: common.h:94
static void packet_queue_start(PacketQueue *q)
Definition: ffplay.c:503
static const OptionDef options[]
Definition: ffplay.c:3573
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:128
float FFTSample
Definition: avfft.h:35
static int dummy
Definition: ffplay.c:3571
#define fail()
Definition: checkasm.h:84
int8_t exp
Definition: eval.c:64
double audio_clock
Definition: ffplay.c:238
int force_refresh
Definition: ffplay.c:210
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2489
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
AVDictionary * sws_dict
Definition: cmdutils.c:70
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3500
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2338
static void update_sample_display(VideoState *is, short *samples, int samples_size)
Definition: ffplay.c:2263
void av_rdft_calc(RDFTContext *s, FFTSample *data)
uint32_t end_display_time
Definition: avcodec.h:3960
static void decoder_destroy(Decoder *d)
Definition: ffplay.c:640
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:3963
static int genpts
Definition: ffplay.c:330
static AVPacket flush_pkt
Definition: ffplay.c:357
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:353
int flip_v
Definition: ffplay.c:168
double frame_last_returned_time
Definition: ffplay.c:283
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: avfilter.c:519
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:499
static const char * subtitle_codec_name
Definition: ffplay.c:341
static int subtitle_disable
Definition: ffplay.c:321
void av_format_inject_global_side_data(AVFormatContext *s)
This function will cause global side data to be injected in the next packet of each stream as well as...
Definition: utils.c:135
int max_size
Definition: ffplay.c:176
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1394
int step
Definition: ffplay.c:295
SDL_Thread * decoder_tid
Definition: ffplay.c:203
AVDictionary * opts
Definition: movenc.c:50
static SDL_Window * window
Definition: ffplay.c:362
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:63
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
Definition: utils.c:4028
SDL_mutex * mutex
Definition: ffplay.c:125
static av_const double hypot(double x, double y)
Definition: libm.h:366
int audio_write_buf_size
Definition: ffplay.c:252
AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
Definition: format.c:164
int linesize[4]
Definition: avcodec.h:3941
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:131
#define SUBPICTURE_QUEUE_SIZE
Definition: ffplay.c:130
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:258
struct MyAVPacketList * next
Definition: ffplay.c:114
#define AV_CH_LAYOUT_STEREO_DOWNMIX
char filename[1024]
input or output filename
Definition: avformat.h:1414
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:248
#define FFMIN(a, b)
Definition: common.h:96
SDL_mutex * mutex
Definition: ffplay.c:179
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:406
int windex
Definition: ffplay.c:174
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:156
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:557
static int cursor_hidden
Definition: ffplay.c:345
static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
Definition: ffplay.c:550
#define width
AVSubtitle sub
Definition: ffplay.c:156
static int lockmgr(void **mtx, enum AVLockOp op)
Definition: ffplay.c:3662
int width
picture width / height.
Definition: avcodec.h:1863
int main(int argc, char **argv)
Definition: ffplay.c:3684
int height
Definition: ffplay.c:164
#define SDL_VOLUME_STEP
Definition: ffplay.c:77
static void show_usage(void)
Definition: ffplay.c:3620
int nb_packets
Definition: ffplay.c:120
static int opt_width(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3472
static int get_video_frame(VideoState *is, AVFrame *frame)
Definition: ffplay.c:1775
int frame_drops_late
Definition: ffplay.c:262
struct AudioParams audio_src
Definition: ffplay.c:255
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
Definition: ffplay.c:3218
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1350
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:74
AVFormatContext * ctx
Definition: movenc.c:48
struct SwrContext * swr_alloc_set_opts(struct SwrContext *s, int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
Definition: swresample.c:59
static int fast
Definition: ffplay.c:329
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2278
int last_i_start
Definition: ffplay.c:269
uint16_t format
Definition: avcodec.h:3958
#define OPT_INT64
Definition: cmdutils.h:175
MyAVPacketList * last_pkt
Definition: ffplay.c:119
Definition: avfft.h:72
void av_rdft_end(RDFTContext *s)
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1465
int n
Definition: avisynth_c.h:684
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2331
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
static int is_full_screen
Definition: ffplay.c:354
#define AV_SYNC_THRESHOLD_MAX
Definition: ffplay.c:82
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:789
static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue)
Definition: ffplay.c:2726
static const AVFilterPad outputs[]
Definition: af_afftfilt.c:386
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:72
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:450
uint8_t * data[4]
data+linesize for the bitmap of this subtitle.
Definition: avcodec.h:3940
static int decoder_reorder_pts
Definition: ffplay.c:332
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:97
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1344
int16_t sample_array[SAMPLE_ARRAY_SIZE]
Definition: ffplay.c:267
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:966
int paused
Definition: ffplay.c:149
static const char * input_filename
Definition: ffplay.c:313
#define FF_ARRAY_ELEMS(a)
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:859
static Frame * frame_queue_peek_last(FrameQueue *f)
Definition: ffplay.c:702
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3627
int av_codec_get_max_lowres(const AVCodec *codec)
Definition: utils.c:1191
int64_t pos
Definition: ffplay.c:160
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
int last_audio_stream
Definition: ffplay.c:306
Stream structure.
Definition: avformat.h:889
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: utils.c:3435
const AVClass * avfilter_get_class(void)
Definition: avfilter.c:1240
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:961
#define SAMPLE_CORRECTION_PERCENT_MAX
Definition: ffplay.c:89
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:248
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:4705
char * filename
Definition: ffplay.c:293
static int screen_height
Definition: ffplay.c:318
static int opt_duration(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3521
sample_rate
static AVInputFormat * iformat
Definition: ffprobe.c:231
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
static const AVFilterPad inputs[]
Definition: af_afftfilt.c:376
int64_t next_pts
Definition: ffplay.c:201
static int autoexit
Definition: ffplay.c:333
AVFrame * frame
Definition: ffplay.c:155
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:267
int serial
Definition: ffplay.c:148
int uploaded
Definition: ffplay.c:167
enum AVMediaType codec_type
Definition: avcodec.h:1684
int(* read_seek)(struct AVFormatContext *, int stream_index, int64_t timestamp, int flags)
Seek to a given timestamp relative to the frames in stream component stream_index.
Definition: avformat.h:756
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
Definition: ffplay.c:830
attribute_deprecated int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
Definition: utils.c:2213
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:171
enum AVCodecID codec_id
Definition: avcodec.h:1693
static void do_exit(VideoState *is)
Definition: ffplay.c:1224
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:254
int sample_rate
samples per second
Definition: avcodec.h:2438
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:215
AVIOContext * pb
I/O context.
Definition: avformat.h:1380
#define ss
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:901
static int loop
Definition: ffplay.c:336
int last_paused
Definition: ffplay.c:212
static int exit_on_keydown
Definition: ffplay.c:334
FFT functions.
main external API structure.
Definition: avcodec.h:1676
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: utils.c:3127
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:568
Decoder subdec
Definition: ffplay.c:232
int av_copy_packet(AVPacket *dst, const AVPacket *src)
Copy packet, including contents.
Definition: avpacket.c:264
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
Definition: swresample.c:140
double max_frame_duration
Definition: ffplay.c:288
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:2748
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:263
Clock vidclk
Definition: ffplay.c:223
int x
Definition: f_ebur128.c:91
#define fp
Definition: regdef.h:44
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:753
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
#define AVFMT_NOGENSEARCH
Format does not allow to fall back on generic search.
Definition: avformat.h:493
static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp)
Definition: ffplay.c:1503
GLint GLenum type
Definition: opengl_enc.c:105
static const char * window_title
Definition: ffplay.c:314
double pts
Definition: ffplay.c:144
static Frame * frame_queue_peek_next(FrameQueue *f)
Definition: ffplay.c:697
static int audio_thread(void *arg)
Definition: ffplay.c:2021
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
static int av_sync_type
Definition: ffplay.c:326
int pkt_serial
Definition: ffplay.c:195
static void frame_queue_signal(FrameQueue *f)
Definition: ffplay.c:685
static const char * format
Definition: movenc.c:47
static const uint16_t channel_layouts[7]
Definition: dca_lbr.c:118
int sample_rate
Sample rate of the audio data.
Definition: frame.h:348
int configure_filtergraph(FilterGraph *fg)
static void free_picture(Frame *vp)
Definition: ffplay.c:800
int av_frame_get_channels(const AVFrame *frame)
Definition: f_ebur128.c:91
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:969
PacketQueue audioq
Definition: ffplay.c:245
int packet_pending
Definition: ffplay.c:197
static int video_open(VideoState *is, Frame *vp)
Definition: ffplay.c:1259
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119