FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
34 #include "libavutil/colorspace.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/avassert.h"
42 #include "libavutil/time.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavcodec/avfft.h"
49 
50 #if CONFIG_AVFILTER
51 # include "libavfilter/avcodec.h"
52 # include "libavfilter/avfilter.h"
53 # include "libavfilter/buffersink.h"
54 # include "libavfilter/buffersrc.h"
55 #endif
56 
57 #include <SDL.h>
58 #include <SDL_thread.h>
59 
60 #include "cmdutils.h"
61 
62 #include <assert.h>
63 
64 const char program_name[] = "ffplay";
65 const int program_birth_year = 2003;
66 
67 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
68 #define MIN_FRAMES 5
69 
70 /* Minimum SDL audio buffer size, in samples. */
71 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
72 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
73 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
74 
75 /* no AV sync correction is done if below the minimum AV sync threshold */
76 #define AV_SYNC_THRESHOLD_MIN 0.04
77 /* AV sync correction is done if above the maximum AV sync threshold */
78 #define AV_SYNC_THRESHOLD_MAX 0.1
79 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
80 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
81 /* no AV correction is done if too big error */
82 #define AV_NOSYNC_THRESHOLD 10.0
83 
84 /* maximum audio speed change to get correct sync */
85 #define SAMPLE_CORRECTION_PERCENT_MAX 10
86 
87 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
88 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
89 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
90 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
91 
92 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
93 #define AUDIO_DIFF_AVG_NB 20
94 
95 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
96 #define REFRESH_RATE 0.01
97 
98 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
99 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
100 #define SAMPLE_ARRAY_SIZE (8 * 65536)
101 
102 #define CURSOR_HIDE_DELAY 1000000
103 
104 static int64_t sws_flags = SWS_BICUBIC;
105 
106 typedef struct MyAVPacketList {
109  int serial;
111 
112 typedef struct PacketQueue {
115  int size;
117  int serial;
118  SDL_mutex *mutex;
119  SDL_cond *cond;
120 } PacketQueue;
121 
122 #define VIDEO_PICTURE_QUEUE_SIZE 3
123 #define SUBPICTURE_QUEUE_SIZE 16
124 #define SAMPLE_QUEUE_SIZE 9
125 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
126 
127 typedef struct AudioParams {
128  int freq;
129  int channels;
130  int64_t channel_layout;
134 } AudioParams;
135 
136 typedef struct Clock {
137  double pts; /* clock base */
138  double pts_drift; /* clock base minus time at which we updated the clock */
139  double last_updated;
140  double speed;
141  int serial; /* clock is based on a packet with this serial */
142  int paused;
143  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
144 } Clock;
145 
146 /* Common struct for handling all types of decoded data and allocated render buffers. */
147 typedef struct Frame {
150  int serial;
151  double pts; /* presentation timestamp for the frame */
152  double duration; /* estimated duration of the frame */
153  int64_t pos; /* byte position of the frame in the input file */
154  SDL_Overlay *bmp;
157  int width;
158  int height;
160 } Frame;
161 
162 typedef struct FrameQueue {
164  int rindex;
165  int windex;
166  int size;
167  int max_size;
170  SDL_mutex *mutex;
171  SDL_cond *cond;
173 } FrameQueue;
174 
175 enum {
176  AV_SYNC_AUDIO_MASTER, /* default choice */
178  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
179 };
180 
181 typedef struct Decoder {
187  int finished;
189  SDL_cond *empty_queue_cond;
190  int64_t start_pts;
192  int64_t next_pts;
194 } Decoder;
195 
196 typedef struct VideoState {
197  SDL_Thread *read_tid;
198  SDL_Thread *video_tid;
199  SDL_Thread *audio_tid;
203  int paused;
206  int seek_req;
208  int64_t seek_pos;
209  int64_t seek_rel;
212  int realtime;
213 
217 
221 
225 
227 
229 
230  double audio_clock;
232  double audio_diff_cum; /* used for AV difference average computation */
242  unsigned int audio_buf_size; /* in bytes */
243  unsigned int audio_buf1_size;
244  int audio_buf_index; /* in bytes */
247 #if CONFIG_AVFILTER
248  struct AudioParams audio_filter_src;
249 #endif
254 
255  enum ShowMode {
257  } show_mode;
264  int xpos;
266 
267  SDL_Thread *subtitle_tid;
271 
272  double frame_timer;
278  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
279 #if !CONFIG_AVFILTER
281 #endif
283 
284  char filename[1024];
286  int step;
287 
288 #if CONFIG_AVFILTER
289  int vfilter_idx;
290  AVFilterContext *in_video_filter; // the first filter in the video chain
291  AVFilterContext *out_video_filter; // the last filter in the video chain
292  AVFilterContext *in_audio_filter; // the first filter in the audio chain
293  AVFilterContext *out_audio_filter; // the last filter in the audio chain
294  AVFilterGraph *agraph; // audio filter graph
295 #endif
296 
298 
300 } VideoState;
301 
302 /* options specified by the user */
304 static const char *input_filename;
305 static const char *window_title;
306 static int fs_screen_width;
307 static int fs_screen_height;
308 static int default_width = 640;
309 static int default_height = 480;
310 static int screen_width = 0;
311 static int screen_height = 0;
312 static int audio_disable;
313 static int video_disable;
314 static int subtitle_disable;
315 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
316 static int seek_by_bytes = -1;
317 static int display_disable;
318 static int show_status = 1;
320 static int64_t start_time = AV_NOPTS_VALUE;
321 static int64_t duration = AV_NOPTS_VALUE;
322 static int fast = 0;
323 static int genpts = 0;
324 static int lowres = 0;
325 static int decoder_reorder_pts = -1;
326 static int autoexit;
327 static int exit_on_keydown;
328 static int exit_on_mousedown;
329 static int loop = 1;
330 static int framedrop = -1;
331 static int infinite_buffer = -1;
332 static enum ShowMode show_mode = SHOW_MODE_NONE;
333 static const char *audio_codec_name;
334 static const char *subtitle_codec_name;
335 static const char *video_codec_name;
336 double rdftspeed = 0.02;
337 static int64_t cursor_last_shown;
338 static int cursor_hidden = 0;
339 #if CONFIG_AVFILTER
340 static const char **vfilters_list = NULL;
341 static int nb_vfilters = 0;
342 static char *afilters = NULL;
343 #endif
344 static int autorotate = 1;
345 
346 /* current context */
347 static int is_full_screen;
348 static int64_t audio_callback_time;
349 
351 
352 #define FF_ALLOC_EVENT (SDL_USEREVENT)
353 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
354 
355 static SDL_Surface *screen;
356 
357 #if CONFIG_AVFILTER
358 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
359 {
360  GROW_ARRAY(vfilters_list, nb_vfilters);
361  vfilters_list[nb_vfilters - 1] = arg;
362  return 0;
363 }
364 #endif
365 
366 static inline
367 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
368  enum AVSampleFormat fmt2, int64_t channel_count2)
369 {
370  /* If channel count == 1, planar and non-planar formats are the same */
371  if (channel_count1 == 1 && channel_count2 == 1)
373  else
374  return channel_count1 != channel_count2 || fmt1 != fmt2;
375 }
376 
377 static inline
378 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
379 {
380  if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
381  return channel_layout;
382  else
383  return 0;
384 }
385 
386 static void free_picture(Frame *vp);
387 
389 {
390  MyAVPacketList *pkt1;
391 
392  if (q->abort_request)
393  return -1;
394 
395  pkt1 = av_malloc(sizeof(MyAVPacketList));
396  if (!pkt1)
397  return -1;
398  pkt1->pkt = *pkt;
399  pkt1->next = NULL;
400  if (pkt == &flush_pkt)
401  q->serial++;
402  pkt1->serial = q->serial;
403 
404  if (!q->last_pkt)
405  q->first_pkt = pkt1;
406  else
407  q->last_pkt->next = pkt1;
408  q->last_pkt = pkt1;
409  q->nb_packets++;
410  q->size += pkt1->pkt.size + sizeof(*pkt1);
411  /* XXX: should duplicate packet data in DV case */
412  SDL_CondSignal(q->cond);
413  return 0;
414 }
415 
417 {
418  int ret;
419 
420  /* duplicate the packet */
421  if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
422  return -1;
423 
424  SDL_LockMutex(q->mutex);
425  ret = packet_queue_put_private(q, pkt);
426  SDL_UnlockMutex(q->mutex);
427 
428  if (pkt != &flush_pkt && ret < 0)
429  av_free_packet(pkt);
430 
431  return ret;
432 }
433 
434 static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
435 {
436  AVPacket pkt1, *pkt = &pkt1;
437  av_init_packet(pkt);
438  pkt->data = NULL;
439  pkt->size = 0;
440  pkt->stream_index = stream_index;
441  return packet_queue_put(q, pkt);
442 }
443 
444 /* packet queue handling */
446 {
447  memset(q, 0, sizeof(PacketQueue));
448  q->mutex = SDL_CreateMutex();
449  q->cond = SDL_CreateCond();
450  q->abort_request = 1;
451 }
452 
454 {
455  MyAVPacketList *pkt, *pkt1;
456 
457  SDL_LockMutex(q->mutex);
458  for (pkt = q->first_pkt; pkt; pkt = pkt1) {
459  pkt1 = pkt->next;
460  av_free_packet(&pkt->pkt);
461  av_freep(&pkt);
462  }
463  q->last_pkt = NULL;
464  q->first_pkt = NULL;
465  q->nb_packets = 0;
466  q->size = 0;
467  SDL_UnlockMutex(q->mutex);
468 }
469 
471 {
473  SDL_DestroyMutex(q->mutex);
474  SDL_DestroyCond(q->cond);
475 }
476 
478 {
479  SDL_LockMutex(q->mutex);
480 
481  q->abort_request = 1;
482 
483  SDL_CondSignal(q->cond);
484 
485  SDL_UnlockMutex(q->mutex);
486 }
487 
489 {
490  SDL_LockMutex(q->mutex);
491  q->abort_request = 0;
492  packet_queue_put_private(q, &flush_pkt);
493  SDL_UnlockMutex(q->mutex);
494 }
495 
496 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
497 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
498 {
499  MyAVPacketList *pkt1;
500  int ret;
501 
502  SDL_LockMutex(q->mutex);
503 
504  for (;;) {
505  if (q->abort_request) {
506  ret = -1;
507  break;
508  }
509 
510  pkt1 = q->first_pkt;
511  if (pkt1) {
512  q->first_pkt = pkt1->next;
513  if (!q->first_pkt)
514  q->last_pkt = NULL;
515  q->nb_packets--;
516  q->size -= pkt1->pkt.size + sizeof(*pkt1);
517  *pkt = pkt1->pkt;
518  if (serial)
519  *serial = pkt1->serial;
520  av_free(pkt1);
521  ret = 1;
522  break;
523  } else if (!block) {
524  ret = 0;
525  break;
526  } else {
527  SDL_CondWait(q->cond, q->mutex);
528  }
529  }
530  SDL_UnlockMutex(q->mutex);
531  return ret;
532 }
533 
534 static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
535  memset(d, 0, sizeof(Decoder));
536  d->avctx = avctx;
537  d->queue = queue;
538  d->empty_queue_cond = empty_queue_cond;
540 }
541 
543  int got_frame = 0;
544 
545  do {
546  int ret = -1;
547 
548  if (d->queue->abort_request)
549  return -1;
550 
551  if (!d->packet_pending || d->queue->serial != d->pkt_serial) {
552  AVPacket pkt;
553  do {
554  if (d->queue->nb_packets == 0)
555  SDL_CondSignal(d->empty_queue_cond);
556  if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
557  return -1;
558  if (pkt.data == flush_pkt.data) {
560  d->finished = 0;
561  d->next_pts = d->start_pts;
562  d->next_pts_tb = d->start_pts_tb;
563  }
564  } while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial);
565  av_free_packet(&d->pkt);
566  d->pkt_temp = d->pkt = pkt;
567  d->packet_pending = 1;
568  }
569 
570  switch (d->avctx->codec_type) {
571  case AVMEDIA_TYPE_VIDEO:
572  ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp);
573  if (got_frame) {
574  if (decoder_reorder_pts == -1) {
575  frame->pts = av_frame_get_best_effort_timestamp(frame);
576  } else if (decoder_reorder_pts) {
577  frame->pts = frame->pkt_pts;
578  } else {
579  frame->pts = frame->pkt_dts;
580  }
581  }
582  break;
583  case AVMEDIA_TYPE_AUDIO:
584  ret = avcodec_decode_audio4(d->avctx, frame, &got_frame, &d->pkt_temp);
585  if (got_frame) {
586  AVRational tb = (AVRational){1, frame->sample_rate};
587  if (frame->pts != AV_NOPTS_VALUE)
588  frame->pts = av_rescale_q(frame->pts, d->avctx->time_base, tb);
589  else if (frame->pkt_pts != AV_NOPTS_VALUE)
590  frame->pts = av_rescale_q(frame->pkt_pts, av_codec_get_pkt_timebase(d->avctx), tb);
591  else if (d->next_pts != AV_NOPTS_VALUE)
592  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
593  if (frame->pts != AV_NOPTS_VALUE) {
594  d->next_pts = frame->pts + frame->nb_samples;
595  d->next_pts_tb = tb;
596  }
597  }
598  break;
600  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &d->pkt_temp);
601  break;
602  }
603 
604  if (ret < 0) {
605  d->packet_pending = 0;
606  } else {
607  d->pkt_temp.dts =
609  if (d->pkt_temp.data) {
611  ret = d->pkt_temp.size;
612  d->pkt_temp.data += ret;
613  d->pkt_temp.size -= ret;
614  if (d->pkt_temp.size <= 0)
615  d->packet_pending = 0;
616  } else {
617  if (!got_frame) {
618  d->packet_pending = 0;
619  d->finished = d->pkt_serial;
620  }
621  }
622  }
623  } while (!got_frame && !d->finished);
624 
625  return got_frame;
626 }
627 
628 static void decoder_destroy(Decoder *d) {
629  av_free_packet(&d->pkt);
630 }
631 
633 {
634  av_frame_unref(vp->frame);
635  avsubtitle_free(&vp->sub);
636 }
637 
638 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
639 {
640  int i;
641  memset(f, 0, sizeof(FrameQueue));
642  if (!(f->mutex = SDL_CreateMutex()))
643  return AVERROR(ENOMEM);
644  if (!(f->cond = SDL_CreateCond()))
645  return AVERROR(ENOMEM);
646  f->pktq = pktq;
647  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
648  f->keep_last = !!keep_last;
649  for (i = 0; i < f->max_size; i++)
650  if (!(f->queue[i].frame = av_frame_alloc()))
651  return AVERROR(ENOMEM);
652  return 0;
653 }
654 
656 {
657  int i;
658  for (i = 0; i < f->max_size; i++) {
659  Frame *vp = &f->queue[i];
661  av_frame_free(&vp->frame);
662  free_picture(vp);
663  }
664  SDL_DestroyMutex(f->mutex);
665  SDL_DestroyCond(f->cond);
666 }
667 
669 {
670  SDL_LockMutex(f->mutex);
671  SDL_CondSignal(f->cond);
672  SDL_UnlockMutex(f->mutex);
673 }
674 
676 {
677  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
678 }
679 
681 {
682  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
683 }
684 
686 {
687  return &f->queue[f->rindex];
688 }
689 
691 {
692  /* wait until we have space to put a new frame */
693  SDL_LockMutex(f->mutex);
694  while (f->size >= f->max_size &&
695  !f->pktq->abort_request) {
696  SDL_CondWait(f->cond, f->mutex);
697  }
698  SDL_UnlockMutex(f->mutex);
699 
700  if (f->pktq->abort_request)
701  return NULL;
702 
703  return &f->queue[f->windex];
704 }
705 
707 {
708  /* wait until we have a readable a new frame */
709  SDL_LockMutex(f->mutex);
710  while (f->size - f->rindex_shown <= 0 &&
711  !f->pktq->abort_request) {
712  SDL_CondWait(f->cond, f->mutex);
713  }
714  SDL_UnlockMutex(f->mutex);
715 
716  if (f->pktq->abort_request)
717  return NULL;
718 
719  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
720 }
721 
723 {
724  if (++f->windex == f->max_size)
725  f->windex = 0;
726  SDL_LockMutex(f->mutex);
727  f->size++;
728  SDL_CondSignal(f->cond);
729  SDL_UnlockMutex(f->mutex);
730 }
731 
733 {
734  if (f->keep_last && !f->rindex_shown) {
735  f->rindex_shown = 1;
736  return;
737  }
739  if (++f->rindex == f->max_size)
740  f->rindex = 0;
741  SDL_LockMutex(f->mutex);
742  f->size--;
743  SDL_CondSignal(f->cond);
744  SDL_UnlockMutex(f->mutex);
745 }
746 
747 /* jump back to the previous frame if available by resetting rindex_shown */
749 {
750  int ret = f->rindex_shown;
751  f->rindex_shown = 0;
752  return ret;
753 }
754 
755 /* return the number of undisplayed frames in the queue */
757 {
758  return f->size - f->rindex_shown;
759 }
760 
761 /* return last shown position */
763 {
764  Frame *fp = &f->queue[f->rindex];
765  if (f->rindex_shown && fp->serial == f->pktq->serial)
766  return fp->pos;
767  else
768  return -1;
769 }
770 
771 static inline void fill_rectangle(SDL_Surface *screen,
772  int x, int y, int w, int h, int color, int update)
773 {
774  SDL_Rect rect;
775  rect.x = x;
776  rect.y = y;
777  rect.w = w;
778  rect.h = h;
779  SDL_FillRect(screen, &rect, color);
780  if (update && w > 0 && h > 0)
781  SDL_UpdateRect(screen, x, y, w, h);
782 }
783 
784 /* draw only the border of a rectangle */
785 static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
786 {
787  int w1, w2, h1, h2;
788 
789  /* fill the background */
790  w1 = x;
791  if (w1 < 0)
792  w1 = 0;
793  w2 = width - (x + w);
794  if (w2 < 0)
795  w2 = 0;
796  h1 = y;
797  if (h1 < 0)
798  h1 = 0;
799  h2 = height - (y + h);
800  if (h2 < 0)
801  h2 = 0;
803  xleft, ytop,
804  w1, height,
805  color, update);
807  xleft + width - w2, ytop,
808  w2, height,
809  color, update);
811  xleft + w1, ytop,
812  width - w1 - w2, h1,
813  color, update);
815  xleft + w1, ytop + height - h2,
816  width - w1 - w2, h2,
817  color, update);
818 }
819 
820 #define ALPHA_BLEND(a, oldp, newp, s)\
821 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
822 
823 #define RGBA_IN(r, g, b, a, s)\
824 {\
825  unsigned int v = ((const uint32_t *)(s))[0];\
826  a = (v >> 24) & 0xff;\
827  r = (v >> 16) & 0xff;\
828  g = (v >> 8) & 0xff;\
829  b = v & 0xff;\
830 }
831 
832 #define YUVA_IN(y, u, v, a, s, pal)\
833 {\
834  unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
835  a = (val >> 24) & 0xff;\
836  y = (val >> 16) & 0xff;\
837  u = (val >> 8) & 0xff;\
838  v = val & 0xff;\
839 }
840 
841 #define YUVA_OUT(d, y, u, v, a)\
842 {\
843  ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
844 }
845 
846 
847 #define BPP 1
848 
849 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
850 {
851  int wrap, wrap3, width2, skip2;
852  int y, u, v, a, u1, v1, a1, w, h;
853  uint8_t *lum, *cb, *cr;
854  const uint8_t *p;
855  const uint32_t *pal;
856  int dstx, dsty, dstw, dsth;
857 
858  dstw = av_clip(rect->w, 0, imgw);
859  dsth = av_clip(rect->h, 0, imgh);
860  dstx = av_clip(rect->x, 0, imgw - dstw);
861  dsty = av_clip(rect->y, 0, imgh - dsth);
862  lum = dst->data[0] + dsty * dst->linesize[0];
863  cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
864  cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
865 
866  width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
867  skip2 = dstx >> 1;
868  wrap = dst->linesize[0];
869  wrap3 = rect->pict.linesize[0];
870  p = rect->pict.data[0];
871  pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
872 
873  if (dsty & 1) {
874  lum += dstx;
875  cb += skip2;
876  cr += skip2;
877 
878  if (dstx & 1) {
879  YUVA_IN(y, u, v, a, p, pal);
880  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
881  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
882  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
883  cb++;
884  cr++;
885  lum++;
886  p += BPP;
887  }
888  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
889  YUVA_IN(y, u, v, a, p, pal);
890  u1 = u;
891  v1 = v;
892  a1 = a;
893  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
894 
895  YUVA_IN(y, u, v, a, p + BPP, pal);
896  u1 += u;
897  v1 += v;
898  a1 += a;
899  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
900  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
901  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
902  cb++;
903  cr++;
904  p += 2 * BPP;
905  lum += 2;
906  }
907  if (w) {
908  YUVA_IN(y, u, v, a, p, pal);
909  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
910  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
911  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
912  p++;
913  lum++;
914  }
915  p += wrap3 - dstw * BPP;
916  lum += wrap - dstw - dstx;
917  cb += dst->linesize[1] - width2 - skip2;
918  cr += dst->linesize[2] - width2 - skip2;
919  }
920  for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
921  lum += dstx;
922  cb += skip2;
923  cr += skip2;
924 
925  if (dstx & 1) {
926  YUVA_IN(y, u, v, a, p, pal);
927  u1 = u;
928  v1 = v;
929  a1 = a;
930  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
931  p += wrap3;
932  lum += wrap;
933  YUVA_IN(y, u, v, a, p, pal);
934  u1 += u;
935  v1 += v;
936  a1 += a;
937  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
938  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
939  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
940  cb++;
941  cr++;
942  p += -wrap3 + BPP;
943  lum += -wrap + 1;
944  }
945  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
946  YUVA_IN(y, u, v, a, p, pal);
947  u1 = u;
948  v1 = v;
949  a1 = a;
950  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
951 
952  YUVA_IN(y, u, v, a, p + BPP, pal);
953  u1 += u;
954  v1 += v;
955  a1 += a;
956  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
957  p += wrap3;
958  lum += wrap;
959 
960  YUVA_IN(y, u, v, a, p, pal);
961  u1 += u;
962  v1 += v;
963  a1 += a;
964  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
965 
966  YUVA_IN(y, u, v, a, p + BPP, pal);
967  u1 += u;
968  v1 += v;
969  a1 += a;
970  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
971 
972  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
973  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
974 
975  cb++;
976  cr++;
977  p += -wrap3 + 2 * BPP;
978  lum += -wrap + 2;
979  }
980  if (w) {
981  YUVA_IN(y, u, v, a, p, pal);
982  u1 = u;
983  v1 = v;
984  a1 = a;
985  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
986  p += wrap3;
987  lum += wrap;
988  YUVA_IN(y, u, v, a, p, pal);
989  u1 += u;
990  v1 += v;
991  a1 += a;
992  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
993  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
994  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
995  cb++;
996  cr++;
997  p += -wrap3 + BPP;
998  lum += -wrap + 1;
999  }
1000  p += wrap3 + (wrap3 - dstw * BPP);
1001  lum += wrap + (wrap - dstw - dstx);
1002  cb += dst->linesize[1] - width2 - skip2;
1003  cr += dst->linesize[2] - width2 - skip2;
1004  }
1005  /* handle odd height */
1006  if (h) {
1007  lum += dstx;
1008  cb += skip2;
1009  cr += skip2;
1010 
1011  if (dstx & 1) {
1012  YUVA_IN(y, u, v, a, p, pal);
1013  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
1014  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
1015  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
1016  cb++;
1017  cr++;
1018  lum++;
1019  p += BPP;
1020  }
1021  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
1022  YUVA_IN(y, u, v, a, p, pal);
1023  u1 = u;
1024  v1 = v;
1025  a1 = a;
1026  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
1027 
1028  YUVA_IN(y, u, v, a, p + BPP, pal);
1029  u1 += u;
1030  v1 += v;
1031  a1 += a;
1032  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
1033  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
1034  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
1035  cb++;
1036  cr++;
1037  p += 2 * BPP;
1038  lum += 2;
1039  }
1040  if (w) {
1041  YUVA_IN(y, u, v, a, p, pal);
1042  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
1043  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
1044  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
1045  }
1046  }
1047 }
1048 
1049 static void free_picture(Frame *vp)
1050 {
1051  if (vp->bmp) {
1052  SDL_FreeYUVOverlay(vp->bmp);
1053  vp->bmp = NULL;
1054  }
1055 }
1056 
1057 static void calculate_display_rect(SDL_Rect *rect,
1058  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
1059  int pic_width, int pic_height, AVRational pic_sar)
1060 {
1061  float aspect_ratio;
1062  int width, height, x, y;
1063 
1064  if (pic_sar.num == 0)
1065  aspect_ratio = 0;
1066  else
1067  aspect_ratio = av_q2d(pic_sar);
1068 
1069  if (aspect_ratio <= 0.0)
1070  aspect_ratio = 1.0;
1071  aspect_ratio *= (float)pic_width / (float)pic_height;
1072 
1073  /* XXX: we suppose the screen has a 1.0 pixel ratio */
1074  height = scr_height;
1075  width = ((int)rint(height * aspect_ratio)) & ~1;
1076  if (width > scr_width) {
1077  width = scr_width;
1078  height = ((int)rint(width / aspect_ratio)) & ~1;
1079  }
1080  x = (scr_width - width) / 2;
1081  y = (scr_height - height) / 2;
1082  rect->x = scr_xleft + x;
1083  rect->y = scr_ytop + y;
1084  rect->w = FFMAX(width, 1);
1085  rect->h = FFMAX(height, 1);
1086 }
1087 
1089 {
1090  Frame *vp;
1091  Frame *sp;
1092  AVPicture pict;
1093  SDL_Rect rect;
1094  int i;
1095 
1096  vp = frame_queue_peek(&is->pictq);
1097  if (vp->bmp) {
1098  if (is->subtitle_st) {
1099  if (frame_queue_nb_remaining(&is->subpq) > 0) {
1100  sp = frame_queue_peek(&is->subpq);
1101 
1102  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
1103  SDL_LockYUVOverlay (vp->bmp);
1104 
1105  pict.data[0] = vp->bmp->pixels[0];
1106  pict.data[1] = vp->bmp->pixels[2];
1107  pict.data[2] = vp->bmp->pixels[1];
1108 
1109  pict.linesize[0] = vp->bmp->pitches[0];
1110  pict.linesize[1] = vp->bmp->pitches[2];
1111  pict.linesize[2] = vp->bmp->pitches[1];
1112 
1113  for (i = 0; i < sp->sub.num_rects; i++)
1114  blend_subrect(&pict, sp->sub.rects[i],
1115  vp->bmp->w, vp->bmp->h);
1116 
1117  SDL_UnlockYUVOverlay (vp->bmp);
1118  }
1119  }
1120  }
1121 
1122  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1123 
1124  SDL_DisplayYUVOverlay(vp->bmp, &rect);
1125 
1126  if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
1127  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
1128  fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
1129  is->last_display_rect = rect;
1130  }
1131  }
1132 }
1133 
1134 static inline int compute_mod(int a, int b)
1135 {
1136  return a < 0 ? a%b + b : a%b;
1137 }
1138 
1140 {
1141  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1142  int ch, channels, h, h2, bgcolor, fgcolor;
1143  int64_t time_diff;
1144  int rdft_bits, nb_freq;
1145 
1146  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1147  ;
1148  nb_freq = 1 << (rdft_bits - 1);
1149 
1150  /* compute display index : center on currently output samples */
1151  channels = s->audio_tgt.channels;
1152  nb_display_channels = channels;
1153  if (!s->paused) {
1154  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1155  n = 2 * channels;
1156  delay = s->audio_write_buf_size;
1157  delay /= n;
1158 
1159  /* to be more precise, we take into account the time spent since
1160  the last buffer computation */
1161  if (audio_callback_time) {
1162  time_diff = av_gettime_relative() - audio_callback_time;
1163  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1164  }
1165 
1166  delay += 2 * data_used;
1167  if (delay < data_used)
1168  delay = data_used;
1169 
1170  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1171  if (s->show_mode == SHOW_MODE_WAVES) {
1172  h = INT_MIN;
1173  for (i = 0; i < 1000; i += channels) {
1174  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1175  int a = s->sample_array[idx];
1176  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1177  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1178  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1179  int score = a - d;
1180  if (h < score && (b ^ c) < 0) {
1181  h = score;
1182  i_start = idx;
1183  }
1184  }
1185  }
1186 
1187  s->last_i_start = i_start;
1188  } else {
1189  i_start = s->last_i_start;
1190  }
1191 
1192  bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
1193  if (s->show_mode == SHOW_MODE_WAVES) {
1195  s->xleft, s->ytop, s->width, s->height,
1196  bgcolor, 0);
1197 
1198  fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
1199 
1200  /* total height for one channel */
1201  h = s->height / nb_display_channels;
1202  /* graph height / 2 */
1203  h2 = (h * 9) / 20;
1204  for (ch = 0; ch < nb_display_channels; ch++) {
1205  i = i_start + ch;
1206  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1207  for (x = 0; x < s->width; x++) {
1208  y = (s->sample_array[i] * h2) >> 15;
1209  if (y < 0) {
1210  y = -y;
1211  ys = y1 - y;
1212  } else {
1213  ys = y1;
1214  }
1216  s->xleft + x, ys, 1, y,
1217  fgcolor, 0);
1218  i += channels;
1219  if (i >= SAMPLE_ARRAY_SIZE)
1220  i -= SAMPLE_ARRAY_SIZE;
1221  }
1222  }
1223 
1224  fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
1225 
1226  for (ch = 1; ch < nb_display_channels; ch++) {
1227  y = s->ytop + ch * h;
1229  s->xleft, y, s->width, 1,
1230  fgcolor, 0);
1231  }
1232  SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
1233  } else {
1234  nb_display_channels= FFMIN(nb_display_channels, 2);
1235  if (rdft_bits != s->rdft_bits) {
1236  av_rdft_end(s->rdft);
1237  av_free(s->rdft_data);
1238  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
1239  s->rdft_bits = rdft_bits;
1240  s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
1241  }
1242  {
1243  FFTSample *data[2];
1244  for (ch = 0; ch < nb_display_channels; ch++) {
1245  data[ch] = s->rdft_data + 2 * nb_freq * ch;
1246  i = i_start + ch;
1247  for (x = 0; x < 2 * nb_freq; x++) {
1248  double w = (x-nb_freq) * (1.0 / nb_freq);
1249  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
1250  i += channels;
1251  if (i >= SAMPLE_ARRAY_SIZE)
1252  i -= SAMPLE_ARRAY_SIZE;
1253  }
1254  av_rdft_calc(s->rdft, data[ch]);
1255  }
1256  /* Least efficient way to do this, we should of course
1257  * directly access it but it is more than fast enough. */
1258  for (y = 0; y < s->height; y++) {
1259  double w = 1 / sqrt(nb_freq);
1260  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1261  int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
1262  + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
1263  a = FFMIN(a, 255);
1264  b = FFMIN(b, 255);
1265  fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
1266 
1268  s->xpos, s->height-y, 1, 1,
1269  fgcolor, 0);
1270  }
1271  }
1272  SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
1273  if (!s->paused)
1274  s->xpos++;
1275  if (s->xpos >= s->width)
1276  s->xpos= s->xleft;
1277  }
1278 }
1279 
1280 static void stream_close(VideoState *is)
1281 {
1282  /* XXX: use a special url_shutdown call to abort parse cleanly */
1283  is->abort_request = 1;
1284  SDL_WaitThread(is->read_tid, NULL);
1288 
1289  /* free all pictures */
1290  frame_queue_destory(&is->pictq);
1291  frame_queue_destory(&is->sampq);
1292  frame_queue_destory(&is->subpq);
1293  SDL_DestroyCond(is->continue_read_thread);
1294 #if !CONFIG_AVFILTER
1296 #endif
1297  av_free(is);
1298 }
1299 
1300 static void do_exit(VideoState *is)
1301 {
1302  if (is) {
1303  stream_close(is);
1304  }
1305  av_lockmgr_register(NULL);
1306  uninit_opts();
1307 #if CONFIG_AVFILTER
1308  av_freep(&vfilters_list);
1309 #endif
1311  if (show_status)
1312  printf("\n");
1313  SDL_Quit();
1314  av_log(NULL, AV_LOG_QUIET, "%s", "");
1315  exit(0);
1316 }
1317 
1318 static void sigterm_handler(int sig)
1319 {
1320  exit(123);
1321 }
1322 
1324 {
1325  SDL_Rect rect;
1326  calculate_display_rect(&rect, 0, 0, INT_MAX, height, width, height, sar);
1327  default_width = rect.w;
1328  default_height = rect.h;
1329 }
1330 
1331 static int video_open(VideoState *is, int force_set_video_mode, Frame *vp)
1332 {
1333  int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
1334  int w,h;
1335 
1336  if (is_full_screen) flags |= SDL_FULLSCREEN;
1337  else flags |= SDL_RESIZABLE;
1338 
1339  if (vp && vp->width)
1340  set_default_window_size(vp->width, vp->height, vp->sar);
1341 
1343  w = fs_screen_width;
1344  h = fs_screen_height;
1345  } else if (!is_full_screen && screen_width) {
1346  w = screen_width;
1347  h = screen_height;
1348  } else {
1349  w = default_width;
1350  h = default_height;
1351  }
1352  w = FFMIN(16383, w);
1353  if (screen && is->width == screen->w && screen->w == w
1354  && is->height== screen->h && screen->h == h && !force_set_video_mode)
1355  return 0;
1356  screen = SDL_SetVideoMode(w, h, 0, flags);
1357  if (!screen) {
1358  av_log(NULL, AV_LOG_FATAL, "SDL: could not set video mode - exiting\n");
1359  do_exit(is);
1360  }
1361  if (!window_title)
1363  SDL_WM_SetCaption(window_title, window_title);
1364 
1365  is->width = screen->w;
1366  is->height = screen->h;
1367 
1368  return 0;
1369 }
1370 
1371 /* display the current picture, if any */
1372 static void video_display(VideoState *is)
1373 {
1374  if (!screen)
1375  video_open(is, 0, NULL);
1376  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1377  video_audio_display(is);
1378  else if (is->video_st)
1379  video_image_display(is);
1380 }
1381 
1382 static double get_clock(Clock *c)
1383 {
1384  if (*c->queue_serial != c->serial)
1385  return NAN;
1386  if (c->paused) {
1387  return c->pts;
1388  } else {
1389  double time = av_gettime_relative() / 1000000.0;
1390  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1391  }
1392 }
1393 
1394 static void set_clock_at(Clock *c, double pts, int serial, double time)
1395 {
1396  c->pts = pts;
1397  c->last_updated = time;
1398  c->pts_drift = c->pts - time;
1399  c->serial = serial;
1400 }
1401 
1402 static void set_clock(Clock *c, double pts, int serial)
1403 {
1404  double time = av_gettime_relative() / 1000000.0;
1405  set_clock_at(c, pts, serial, time);
1406 }
1407 
1408 static void set_clock_speed(Clock *c, double speed)
1409 {
1410  set_clock(c, get_clock(c), c->serial);
1411  c->speed = speed;
1412 }
1413 
1414 static void init_clock(Clock *c, int *queue_serial)
1415 {
1416  c->speed = 1.0;
1417  c->paused = 0;
1418  c->queue_serial = queue_serial;
1419  set_clock(c, NAN, -1);
1420 }
1421 
1422 static void sync_clock_to_slave(Clock *c, Clock *slave)
1423 {
1424  double clock = get_clock(c);
1425  double slave_clock = get_clock(slave);
1426  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1427  set_clock(c, slave_clock, slave->serial);
1428 }
1429 
1431  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1432  if (is->video_st)
1433  return AV_SYNC_VIDEO_MASTER;
1434  else
1435  return AV_SYNC_AUDIO_MASTER;
1436  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1437  if (is->audio_st)
1438  return AV_SYNC_AUDIO_MASTER;
1439  else
1440  return AV_SYNC_EXTERNAL_CLOCK;
1441  } else {
1442  return AV_SYNC_EXTERNAL_CLOCK;
1443  }
1444 }
1445 
1446 /* get the current master clock value */
1447 static double get_master_clock(VideoState *is)
1448 {
1449  double val;
1450 
1451  switch (get_master_sync_type(is)) {
1452  case AV_SYNC_VIDEO_MASTER:
1453  val = get_clock(&is->vidclk);
1454  break;
1455  case AV_SYNC_AUDIO_MASTER:
1456  val = get_clock(&is->audclk);
1457  break;
1458  default:
1459  val = get_clock(&is->extclk);
1460  break;
1461  }
1462  return val;
1463 }
1464 
1466  if (is->video_stream >= 0 && is->videoq.nb_packets <= MIN_FRAMES / 2 ||
1467  is->audio_stream >= 0 && is->audioq.nb_packets <= MIN_FRAMES / 2) {
1469  } else if ((is->video_stream < 0 || is->videoq.nb_packets > MIN_FRAMES * 2) &&
1470  (is->audio_stream < 0 || is->audioq.nb_packets > MIN_FRAMES * 2)) {
1472  } else {
1473  double speed = is->extclk.speed;
1474  if (speed != 1.0)
1475  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1476  }
1477 }
1478 
1479 /* seek in the stream */
1480 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1481 {
1482  if (!is->seek_req) {
1483  is->seek_pos = pos;
1484  is->seek_rel = rel;
1485  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1486  if (seek_by_bytes)
1488  is->seek_req = 1;
1489  SDL_CondSignal(is->continue_read_thread);
1490  }
1491 }
1492 
1493 /* pause or resume the video */
1495 {
1496  if (is->paused) {
1497  is->frame_timer += av_gettime_relative() / 1000000.0 + is->vidclk.pts_drift - is->vidclk.pts;
1498  if (is->read_pause_return != AVERROR(ENOSYS)) {
1499  is->vidclk.paused = 0;
1500  }
1501  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1502  }
1503  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1504  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1505 }
1506 
1507 static void toggle_pause(VideoState *is)
1508 {
1509  stream_toggle_pause(is);
1510  is->step = 0;
1511 }
1512 
1514 {
1515  /* if the stream is paused unpause it, then step */
1516  if (is->paused)
1517  stream_toggle_pause(is);
1518  is->step = 1;
1519 }
1520 
1521 static double compute_target_delay(double delay, VideoState *is)
1522 {
1523  double sync_threshold, diff;
1524 
1525  /* update delay to follow master synchronisation source */
1527  /* if video is slave, we try to correct big delays by
1528  duplicating or deleting a frame */
1529  diff = get_clock(&is->vidclk) - get_master_clock(is);
1530 
1531  /* skip or repeat frame. We take into account the
1532  delay to compute the threshold. I still don't know
1533  if it is the best guess */
1534  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1535  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1536  if (diff <= -sync_threshold)
1537  delay = FFMAX(0, delay + diff);
1538  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1539  delay = delay + diff;
1540  else if (diff >= sync_threshold)
1541  delay = 2 * delay;
1542  }
1543  }
1544 
1545  av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1546  delay, -diff);
1547 
1548  return delay;
1549 }
1550 
1551 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1552  if (vp->serial == nextvp->serial) {
1553  double duration = nextvp->pts - vp->pts;
1554  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1555  return vp->duration;
1556  else
1557  return duration;
1558  } else {
1559  return 0.0;
1560  }
1561 }
1562 
1563 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1564  /* update current video pts */
1565  set_clock(&is->vidclk, pts, serial);
1566  sync_clock_to_slave(&is->extclk, &is->vidclk);
1567 }
1568 
1569 /* called to display each frame */
1570 static void video_refresh(void *opaque, double *remaining_time)
1571 {
1572  VideoState *is = opaque;
1573  double time;
1574 
1575  Frame *sp, *sp2;
1576 
1577  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1579 
1580  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1581  time = av_gettime_relative() / 1000000.0;
1582  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1583  video_display(is);
1584  is->last_vis_time = time;
1585  }
1586  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1587  }
1588 
1589  if (is->video_st) {
1590  int redisplay = 0;
1591  if (is->force_refresh)
1592  redisplay = frame_queue_prev(&is->pictq);
1593 retry:
1594  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1595  // nothing to do, no picture to display in the queue
1596  } else {
1597  double last_duration, duration, delay;
1598  Frame *vp, *lastvp;
1599 
1600  /* dequeue the picture */
1601  lastvp = frame_queue_peek_last(&is->pictq);
1602  vp = frame_queue_peek(&is->pictq);
1603 
1604  if (vp->serial != is->videoq.serial) {
1605  frame_queue_next(&is->pictq);
1606  redisplay = 0;
1607  goto retry;
1608  }
1609 
1610  if (lastvp->serial != vp->serial && !redisplay)
1611  is->frame_timer = av_gettime_relative() / 1000000.0;
1612 
1613  if (is->paused)
1614  goto display;
1615 
1616  /* compute nominal last_duration */
1617  last_duration = vp_duration(is, lastvp, vp);
1618  if (redisplay)
1619  delay = 0.0;
1620  else
1621  delay = compute_target_delay(last_duration, is);
1622 
1623  time= av_gettime_relative()/1000000.0;
1624  if (time < is->frame_timer + delay && !redisplay) {
1625  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1626  return;
1627  }
1628 
1629  is->frame_timer += delay;
1630  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1631  is->frame_timer = time;
1632 
1633  SDL_LockMutex(is->pictq.mutex);
1634  if (!redisplay && !isnan(vp->pts))
1635  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1636  SDL_UnlockMutex(is->pictq.mutex);
1637 
1638  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1639  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1640  duration = vp_duration(is, vp, nextvp);
1641  if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1642  if (!redisplay)
1643  is->frame_drops_late++;
1644  frame_queue_next(&is->pictq);
1645  redisplay = 0;
1646  goto retry;
1647  }
1648  }
1649 
1650  if (is->subtitle_st) {
1651  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1652  sp = frame_queue_peek(&is->subpq);
1653 
1654  if (frame_queue_nb_remaining(&is->subpq) > 1)
1655  sp2 = frame_queue_peek_next(&is->subpq);
1656  else
1657  sp2 = NULL;
1658 
1659  if (sp->serial != is->subtitleq.serial
1660  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1661  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1662  {
1663  frame_queue_next(&is->subpq);
1664  } else {
1665  break;
1666  }
1667  }
1668  }
1669 
1670 display:
1671  /* display picture */
1672  if (!display_disable && is->show_mode == SHOW_MODE_VIDEO)
1673  video_display(is);
1674 
1675  frame_queue_next(&is->pictq);
1676 
1677  if (is->step && !is->paused)
1678  stream_toggle_pause(is);
1679  }
1680  }
1681  is->force_refresh = 0;
1682  if (show_status) {
1683  static int64_t last_time;
1684  int64_t cur_time;
1685  int aqsize, vqsize, sqsize;
1686  double av_diff;
1687 
1688  cur_time = av_gettime_relative();
1689  if (!last_time || (cur_time - last_time) >= 30000) {
1690  aqsize = 0;
1691  vqsize = 0;
1692  sqsize = 0;
1693  if (is->audio_st)
1694  aqsize = is->audioq.size;
1695  if (is->video_st)
1696  vqsize = is->videoq.size;
1697  if (is->subtitle_st)
1698  sqsize = is->subtitleq.size;
1699  av_diff = 0;
1700  if (is->audio_st && is->video_st)
1701  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1702  else if (is->video_st)
1703  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1704  else if (is->audio_st)
1705  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1706  av_log(NULL, AV_LOG_INFO,
1707  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1708  get_master_clock(is),
1709  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1710  av_diff,
1712  aqsize / 1024,
1713  vqsize / 1024,
1714  sqsize,
1717  fflush(stdout);
1718  last_time = cur_time;
1719  }
1720  }
1721 }
1722 
1723 /* allocate a picture (needs to do that in main thread to avoid
1724  potential locking problems */
1725 static void alloc_picture(VideoState *is)
1726 {
1727  Frame *vp;
1728  int64_t bufferdiff;
1729 
1730  vp = &is->pictq.queue[is->pictq.windex];
1731 
1732  free_picture(vp);
1733 
1734  video_open(is, 0, vp);
1735 
1736  vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1737  SDL_YV12_OVERLAY,
1738  screen);
1739  bufferdiff = vp->bmp ? FFMAX(vp->bmp->pixels[0], vp->bmp->pixels[1]) - FFMIN(vp->bmp->pixels[0], vp->bmp->pixels[1]) : 0;
1740  if (!vp->bmp || vp->bmp->pitches[0] < vp->width || bufferdiff < (int64_t)vp->height * vp->bmp->pitches[0]) {
1741  /* SDL allocates a buffer smaller than requested if the video
1742  * overlay hardware is unable to support the requested size. */
1743  av_log(NULL, AV_LOG_FATAL,
1744  "Error: the video system does not support an image\n"
1745  "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1746  "to reduce the image size.\n", vp->width, vp->height );
1747  do_exit(is);
1748  }
1749 
1750  SDL_LockMutex(is->pictq.mutex);
1751  vp->allocated = 1;
1752  SDL_CondSignal(is->pictq.cond);
1753  SDL_UnlockMutex(is->pictq.mutex);
1754 }
1755 
1756 static void duplicate_right_border_pixels(SDL_Overlay *bmp) {
1757  int i, width, height;
1758  Uint8 *p, *maxp;
1759  for (i = 0; i < 3; i++) {
1760  width = bmp->w;
1761  height = bmp->h;
1762  if (i > 0) {
1763  width >>= 1;
1764  height >>= 1;
1765  }
1766  if (bmp->pitches[i] > width) {
1767  maxp = bmp->pixels[i] + bmp->pitches[i] * height - 1;
1768  for (p = bmp->pixels[i] + width - 1; p < maxp; p += bmp->pitches[i])
1769  *(p+1) = *p;
1770  }
1771  }
1772 }
1773 
1774 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1775 {
1776  Frame *vp;
1777 
1778 #if defined(DEBUG_SYNC) && 0
1779  printf("frame_type=%c pts=%0.3f\n",
1780  av_get_picture_type_char(src_frame->pict_type), pts);
1781 #endif
1782 
1783  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1784  return -1;
1785 
1786  vp->sar = src_frame->sample_aspect_ratio;
1787 
1788  /* alloc or resize hardware picture buffer */
1789  if (!vp->bmp || vp->reallocate || !vp->allocated ||
1790  vp->width != src_frame->width ||
1791  vp->height != src_frame->height) {
1792  SDL_Event event;
1793 
1794  vp->allocated = 0;
1795  vp->reallocate = 0;
1796  vp->width = src_frame->width;
1797  vp->height = src_frame->height;
1798 
1799  /* the allocation must be done in the main thread to avoid
1800  locking problems. */
1801  event.type = FF_ALLOC_EVENT;
1802  event.user.data1 = is;
1803  SDL_PushEvent(&event);
1804 
1805  /* wait until the picture is allocated */
1806  SDL_LockMutex(is->pictq.mutex);
1807  while (!vp->allocated && !is->videoq.abort_request) {
1808  SDL_CondWait(is->pictq.cond, is->pictq.mutex);
1809  }
1810  /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1811  if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1812  while (!vp->allocated && !is->abort_request) {
1813  SDL_CondWait(is->pictq.cond, is->pictq.mutex);
1814  }
1815  }
1816  SDL_UnlockMutex(is->pictq.mutex);
1817 
1818  if (is->videoq.abort_request)
1819  return -1;
1820  }
1821 
1822  /* if the frame is not skipped, then display it */
1823  if (vp->bmp) {
1824  AVPicture pict = { { 0 } };
1825 
1826  /* get a pointer on the bitmap */
1827  SDL_LockYUVOverlay (vp->bmp);
1828 
1829  pict.data[0] = vp->bmp->pixels[0];
1830  pict.data[1] = vp->bmp->pixels[2];
1831  pict.data[2] = vp->bmp->pixels[1];
1832 
1833  pict.linesize[0] = vp->bmp->pitches[0];
1834  pict.linesize[1] = vp->bmp->pitches[2];
1835  pict.linesize[2] = vp->bmp->pitches[1];
1836 
1837 #if CONFIG_AVFILTER
1838  // FIXME use direct rendering
1839  av_picture_copy(&pict, (AVPicture *)src_frame,
1840  src_frame->format, vp->width, vp->height);
1841 #else
1842  av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1844  vp->width, vp->height, src_frame->format, vp->width, vp->height,
1845  AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1846  if (!is->img_convert_ctx) {
1847  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1848  exit(1);
1849  }
1850  sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1851  0, vp->height, pict.data, pict.linesize);
1852 #endif
1853  /* workaround SDL PITCH_WORKAROUND */
1855  /* update the bitmap content */
1856  SDL_UnlockYUVOverlay(vp->bmp);
1857 
1858  vp->pts = pts;
1859  vp->duration = duration;
1860  vp->pos = pos;
1861  vp->serial = serial;
1862 
1863  /* now we can update the picture count */
1864  frame_queue_push(&is->pictq);
1865  }
1866  return 0;
1867 }
1868 
1870 {
1871  int got_picture;
1872 
1873  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1874  return -1;
1875 
1876  if (got_picture) {
1877  double dpts = NAN;
1878 
1879  if (frame->pts != AV_NOPTS_VALUE)
1880  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1881 
1882  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1883 
1885  if (frame->pts != AV_NOPTS_VALUE) {
1886  double diff = dpts - get_master_clock(is);
1887  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1888  diff - is->frame_last_filter_delay < 0 &&
1889  is->viddec.pkt_serial == is->vidclk.serial &&
1890  is->videoq.nb_packets) {
1891  is->frame_drops_early++;
1892  av_frame_unref(frame);
1893  got_picture = 0;
1894  }
1895  }
1896  }
1897  }
1898 
1899  return got_picture;
1900 }
1901 
1902 #if CONFIG_AVFILTER
1903 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1904  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1905 {
1906  int ret, i;
1907  int nb_filters = graph->nb_filters;
1908  AVFilterInOut *outputs = NULL, *inputs = NULL;
1909 
1910  if (filtergraph) {
1911  outputs = avfilter_inout_alloc();
1912  inputs = avfilter_inout_alloc();
1913  if (!outputs || !inputs) {
1914  ret = AVERROR(ENOMEM);
1915  goto fail;
1916  }
1917 
1918  outputs->name = av_strdup("in");
1919  outputs->filter_ctx = source_ctx;
1920  outputs->pad_idx = 0;
1921  outputs->next = NULL;
1922 
1923  inputs->name = av_strdup("out");
1924  inputs->filter_ctx = sink_ctx;
1925  inputs->pad_idx = 0;
1926  inputs->next = NULL;
1927 
1928  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1929  goto fail;
1930  } else {
1931  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1932  goto fail;
1933  }
1934 
1935  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1936  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1937  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1938 
1939  ret = avfilter_graph_config(graph, NULL);
1940 fail:
1941  avfilter_inout_free(&outputs);
1942  avfilter_inout_free(&inputs);
1943  return ret;
1944 }
1945 
1946 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1947 {
1948  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1949  char sws_flags_str[128];
1950  char buffersrc_args[256];
1951  int ret;
1952  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1953  AVCodecContext *codec = is->video_st->codec;
1954  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1955 
1956  av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1957  snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1958  graph->scale_sws_opts = av_strdup(sws_flags_str);
1959 
1960  snprintf(buffersrc_args, sizeof(buffersrc_args),
1961  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1962  frame->width, frame->height, frame->format,
1964  codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1965  if (fr.num && fr.den)
1966  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1967 
1968  if ((ret = avfilter_graph_create_filter(&filt_src,
1969  avfilter_get_by_name("buffer"),
1970  "ffplay_buffer", buffersrc_args, NULL,
1971  graph)) < 0)
1972  goto fail;
1973 
1974  ret = avfilter_graph_create_filter(&filt_out,
1975  avfilter_get_by_name("buffersink"),
1976  "ffplay_buffersink", NULL, NULL, graph);
1977  if (ret < 0)
1978  goto fail;
1979 
1980  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1981  goto fail;
1982 
1983  last_filter = filt_out;
1984 
1985 /* Note: this macro adds a filter before the lastly added filter, so the
1986  * processing order of the filters is in reverse */
1987 #define INSERT_FILT(name, arg) do { \
1988  AVFilterContext *filt_ctx; \
1989  \
1990  ret = avfilter_graph_create_filter(&filt_ctx, \
1991  avfilter_get_by_name(name), \
1992  "ffplay_" name, arg, NULL, graph); \
1993  if (ret < 0) \
1994  goto fail; \
1995  \
1996  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1997  if (ret < 0) \
1998  goto fail; \
1999  \
2000  last_filter = filt_ctx; \
2001 } while (0)
2002 
2003  /* SDL YUV code is not handling odd width/height for some driver
2004  * combinations, therefore we crop the picture to an even width/height. */
2005  INSERT_FILT("crop", "floor(in_w/2)*2:floor(in_h/2)*2");
2006 
2007  if (autorotate) {
2008  AVDictionaryEntry *rotate_tag = av_dict_get(is->video_st->metadata, "rotate", NULL, 0);
2009  if (rotate_tag && *rotate_tag->value && strcmp(rotate_tag->value, "0")) {
2010  if (!strcmp(rotate_tag->value, "90")) {
2011  INSERT_FILT("transpose", "clock");
2012  } else if (!strcmp(rotate_tag->value, "180")) {
2013  INSERT_FILT("hflip", NULL);
2014  INSERT_FILT("vflip", NULL);
2015  } else if (!strcmp(rotate_tag->value, "270")) {
2016  INSERT_FILT("transpose", "cclock");
2017  } else {
2018  char rotate_buf[64];
2019  snprintf(rotate_buf, sizeof(rotate_buf), "%s*PI/180", rotate_tag->value);
2020  INSERT_FILT("rotate", rotate_buf);
2021  }
2022  }
2023  }
2024 
2025  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
2026  goto fail;
2027 
2028  is->in_video_filter = filt_src;
2029  is->out_video_filter = filt_out;
2030 
2031 fail:
2032  return ret;
2033 }
2034 
2035 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
2036 {
2038  int sample_rates[2] = { 0, -1 };
2039  int64_t channel_layouts[2] = { 0, -1 };
2040  int channels[2] = { 0, -1 };
2041  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
2042  char aresample_swr_opts[512] = "";
2043  AVDictionaryEntry *e = NULL;
2044  char asrc_args[256];
2045  int ret;
2046 
2047  avfilter_graph_free(&is->agraph);
2048  if (!(is->agraph = avfilter_graph_alloc()))
2049  return AVERROR(ENOMEM);
2050 
2051  while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
2052  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
2053  if (strlen(aresample_swr_opts))
2054  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
2055  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
2056 
2057  ret = snprintf(asrc_args, sizeof(asrc_args),
2058  "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
2059  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
2060  is->audio_filter_src.channels,
2061  1, is->audio_filter_src.freq);
2062  if (is->audio_filter_src.channel_layout)
2063  snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
2064  ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
2065 
2066  ret = avfilter_graph_create_filter(&filt_asrc,
2067  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
2068  asrc_args, NULL, is->agraph);
2069  if (ret < 0)
2070  goto end;
2071 
2072 
2073  ret = avfilter_graph_create_filter(&filt_asink,
2074  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
2075  NULL, NULL, is->agraph);
2076  if (ret < 0)
2077  goto end;
2078 
2079  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
2080  goto end;
2081  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
2082  goto end;
2083 
2084  if (force_output_format) {
2085  channel_layouts[0] = is->audio_tgt.channel_layout;
2086  channels [0] = is->audio_tgt.channels;
2087  sample_rates [0] = is->audio_tgt.freq;
2088  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
2089  goto end;
2090  if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2091  goto end;
2092  if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2093  goto end;
2094  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2095  goto end;
2096  }
2097 
2098 
2099  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2100  goto end;
2101 
2102  is->in_audio_filter = filt_asrc;
2103  is->out_audio_filter = filt_asink;
2104 
2105 end:
2106  if (ret < 0)
2107  avfilter_graph_free(&is->agraph);
2108  return ret;
2109 }
2110 #endif /* CONFIG_AVFILTER */
2111 
2112 static int audio_thread(void *arg)
2113 {
2114  VideoState *is = arg;
2115  AVFrame *frame = av_frame_alloc();
2116  Frame *af;
2117 #if CONFIG_AVFILTER
2118  int last_serial = -1;
2119  int64_t dec_channel_layout;
2120  int reconfigure;
2121 #endif
2122  int got_frame = 0;
2123  AVRational tb;
2124  int ret = 0;
2125 
2126  if (!frame)
2127  return AVERROR(ENOMEM);
2128 
2129  do {
2130  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2131  goto the_end;
2132 
2133  if (got_frame) {
2134  tb = (AVRational){1, frame->sample_rate};
2135 
2136 #if CONFIG_AVFILTER
2137  dec_channel_layout = get_valid_channel_layout(frame->channel_layout, av_frame_get_channels(frame));
2138 
2139  reconfigure =
2140  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2141  frame->format, av_frame_get_channels(frame)) ||
2142  is->audio_filter_src.channel_layout != dec_channel_layout ||
2143  is->audio_filter_src.freq != frame->sample_rate ||
2144  is->auddec.pkt_serial != last_serial;
2145 
2146  if (reconfigure) {
2147  char buf1[1024], buf2[1024];
2148  av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2149  av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2150  av_log(NULL, AV_LOG_DEBUG,
2151  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2152  is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2154 
2155  is->audio_filter_src.fmt = frame->format;
2156  is->audio_filter_src.channels = av_frame_get_channels(frame);
2157  is->audio_filter_src.channel_layout = dec_channel_layout;
2158  is->audio_filter_src.freq = frame->sample_rate;
2159  last_serial = is->auddec.pkt_serial;
2160 
2161  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2162  goto the_end;
2163  }
2164 
2165  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2166  goto the_end;
2167 
2168  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2169  tb = is->out_audio_filter->inputs[0]->time_base;
2170 #endif
2171  if (!(af = frame_queue_peek_writable(&is->sampq)))
2172  goto the_end;
2173 
2174  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2175  af->pos = av_frame_get_pkt_pos(frame);
2176  af->serial = is->auddec.pkt_serial;
2177  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2178 
2179  av_frame_move_ref(af->frame, frame);
2180  frame_queue_push(&is->sampq);
2181 
2182 #if CONFIG_AVFILTER
2183  if (is->audioq.serial != is->auddec.pkt_serial)
2184  break;
2185  }
2186  if (ret == AVERROR_EOF)
2187  is->auddec.finished = is->auddec.pkt_serial;
2188 #endif
2189  }
2190  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2191  the_end:
2192 #if CONFIG_AVFILTER
2193  avfilter_graph_free(&is->agraph);
2194 #endif
2195  av_frame_free(&frame);
2196  return ret;
2197 }
2198 
2199 static int video_thread(void *arg)
2200 {
2201  VideoState *is = arg;
2202  AVFrame *frame = av_frame_alloc();
2203  double pts;
2204  double duration;
2205  int ret;
2207  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2208 
2209 #if CONFIG_AVFILTER
2211  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2212  int last_w = 0;
2213  int last_h = 0;
2214  enum AVPixelFormat last_format = -2;
2215  int last_serial = -1;
2216  int last_vfilter_idx = 0;
2217 #endif
2218 
2219  for (;;) {
2220  ret = get_video_frame(is, frame);
2221  if (ret < 0)
2222  goto the_end;
2223  if (!ret)
2224  continue;
2225 
2226 #if CONFIG_AVFILTER
2227  if ( last_w != frame->width
2228  || last_h != frame->height
2229  || last_format != frame->format
2230  || last_serial != is->viddec.pkt_serial
2231  || last_vfilter_idx != is->vfilter_idx) {
2232  av_log(NULL, AV_LOG_DEBUG,
2233  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2234  last_w, last_h,
2235  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2236  frame->width, frame->height,
2237  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2238  avfilter_graph_free(&graph);
2239  graph = avfilter_graph_alloc();
2240  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2241  SDL_Event event;
2242  event.type = FF_QUIT_EVENT;
2243  event.user.data1 = is;
2244  SDL_PushEvent(&event);
2245  goto the_end;
2246  }
2247  filt_in = is->in_video_filter;
2248  filt_out = is->out_video_filter;
2249  last_w = frame->width;
2250  last_h = frame->height;
2251  last_format = frame->format;
2252  last_serial = is->viddec.pkt_serial;
2253  last_vfilter_idx = is->vfilter_idx;
2254  frame_rate = filt_out->inputs[0]->frame_rate;
2255  }
2256 
2257  ret = av_buffersrc_add_frame(filt_in, frame);
2258  if (ret < 0)
2259  goto the_end;
2260 
2261  while (ret >= 0) {
2262  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2263 
2264  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2265  if (ret < 0) {
2266  if (ret == AVERROR_EOF)
2267  is->viddec.finished = is->viddec.pkt_serial;
2268  ret = 0;
2269  break;
2270  }
2271 
2273  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2274  is->frame_last_filter_delay = 0;
2275  tb = filt_out->inputs[0]->time_base;
2276 #endif
2277  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2278  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2279  ret = queue_picture(is, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec.pkt_serial);
2280  av_frame_unref(frame);
2281 #if CONFIG_AVFILTER
2282  }
2283 #endif
2284 
2285  if (ret < 0)
2286  goto the_end;
2287  }
2288  the_end:
2289 #if CONFIG_AVFILTER
2290  avfilter_graph_free(&graph);
2291 #endif
2292  av_frame_free(&frame);
2293  return 0;
2294 }
2295 
2296 static int subtitle_thread(void *arg)
2297 {
2298  VideoState *is = arg;
2299  Frame *sp;
2300  int got_subtitle;
2301  double pts;
2302  int i, j;
2303  int r, g, b, y, u, v, a;
2304 
2305  for (;;) {
2306  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2307  return 0;
2308 
2309  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2310  break;
2311 
2312  pts = 0;
2313 
2314  if (got_subtitle && sp->sub.format == 0) {
2315  if (sp->sub.pts != AV_NOPTS_VALUE)
2316  pts = sp->sub.pts / (double)AV_TIME_BASE;
2317  sp->pts = pts;
2318  sp->serial = is->subdec.pkt_serial;
2319 
2320  for (i = 0; i < sp->sub.num_rects; i++)
2321  {
2322  for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
2323  {
2324  RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
2325  y = RGB_TO_Y_CCIR(r, g, b);
2326  u = RGB_TO_U_CCIR(r, g, b, 0);
2327  v = RGB_TO_V_CCIR(r, g, b, 0);
2328  YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
2329  }
2330  }
2331 
2332  /* now we can update the picture count */
2333  frame_queue_push(&is->subpq);
2334  } else if (got_subtitle) {
2335  avsubtitle_free(&sp->sub);
2336  }
2337  }
2338  return 0;
2339 }
2340 
2341 /* copy samples for viewing in editor window */
2342 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2343 {
2344  int size, len;
2345 
2346  size = samples_size / sizeof(short);
2347  while (size > 0) {
2349  if (len > size)
2350  len = size;
2351  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2352  samples += len;
2353  is->sample_array_index += len;
2355  is->sample_array_index = 0;
2356  size -= len;
2357  }
2358 }
2359 
2360 /* return the wanted number of samples to get better sync if sync_type is video
2361  * or external master clock */
2362 static int synchronize_audio(VideoState *is, int nb_samples)
2363 {
2364  int wanted_nb_samples = nb_samples;
2365 
2366  /* if not master, then we try to remove or add samples to correct the clock */
2368  double diff, avg_diff;
2369  int min_nb_samples, max_nb_samples;
2370 
2371  diff = get_clock(&is->audclk) - get_master_clock(is);
2372 
2373  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2374  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2376  /* not enough measures to have a correct estimate */
2377  is->audio_diff_avg_count++;
2378  } else {
2379  /* estimate the A-V difference */
2380  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2381 
2382  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2383  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2384  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2385  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2386  wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2387  }
2388  av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2389  diff, avg_diff, wanted_nb_samples - nb_samples,
2391  }
2392  } else {
2393  /* too big difference : may be initial PTS errors, so
2394  reset A-V filter */
2395  is->audio_diff_avg_count = 0;
2396  is->audio_diff_cum = 0;
2397  }
2398  }
2399 
2400  return wanted_nb_samples;
2401 }
2402 
2403 /**
2404  * Decode one audio frame and return its uncompressed size.
2405  *
2406  * The processed audio frame is decoded, converted if required, and
2407  * stored in is->audio_buf, with size in bytes given by the return
2408  * value.
2409  */
2411 {
2412  int data_size, resampled_data_size;
2413  int64_t dec_channel_layout;
2414  av_unused double audio_clock0;
2415  int wanted_nb_samples;
2416  Frame *af;
2417 
2418  if (is->paused)
2419  return -1;
2420 
2421  do {
2422  if (!(af = frame_queue_peek_readable(&is->sampq)))
2423  return -1;
2424  frame_queue_next(&is->sampq);
2425  } while (af->serial != is->audioq.serial);
2426 
2428  af->frame->nb_samples,
2429  af->frame->format, 1);
2430 
2431  dec_channel_layout =
2434  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2435 
2436  if (af->frame->format != is->audio_src.fmt ||
2437  dec_channel_layout != is->audio_src.channel_layout ||
2438  af->frame->sample_rate != is->audio_src.freq ||
2439  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2440  swr_free(&is->swr_ctx);
2441  is->swr_ctx = swr_alloc_set_opts(NULL,
2443  dec_channel_layout, af->frame->format, af->frame->sample_rate,
2444  0, NULL);
2445  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2446  av_log(NULL, AV_LOG_ERROR,
2447  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2450  swr_free(&is->swr_ctx);
2451  return -1;
2452  }
2453  is->audio_src.channel_layout = dec_channel_layout;
2455  is->audio_src.freq = af->frame->sample_rate;
2456  is->audio_src.fmt = af->frame->format;
2457  }
2458 
2459  if (is->swr_ctx) {
2460  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2461  uint8_t **out = &is->audio_buf1;
2462  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2463  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2464  int len2;
2465  if (out_size < 0) {
2466  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2467  return -1;
2468  }
2469  if (wanted_nb_samples != af->frame->nb_samples) {
2470  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2471  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2472  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2473  return -1;
2474  }
2475  }
2476  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2477  if (!is->audio_buf1)
2478  return AVERROR(ENOMEM);
2479  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2480  if (len2 < 0) {
2481  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2482  return -1;
2483  }
2484  if (len2 == out_count) {
2485  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2486  if (swr_init(is->swr_ctx) < 0)
2487  swr_free(&is->swr_ctx);
2488  }
2489  is->audio_buf = is->audio_buf1;
2490  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2491  } else {
2492  is->audio_buf = af->frame->data[0];
2493  resampled_data_size = data_size;
2494  }
2495 
2496  audio_clock0 = is->audio_clock;
2497  /* update the audio clock with the pts */
2498  if (!isnan(af->pts))
2499  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2500  else
2501  is->audio_clock = NAN;
2502  is->audio_clock_serial = af->serial;
2503 #ifdef DEBUG
2504  {
2505  static double last_clock;
2506  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2507  is->audio_clock - last_clock,
2508  is->audio_clock, audio_clock0);
2509  last_clock = is->audio_clock;
2510  }
2511 #endif
2512  return resampled_data_size;
2513 }
2514 
2515 /* prepare a new audio buffer */
2516 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2517 {
2518  VideoState *is = opaque;
2519  int audio_size, len1;
2520 
2522 
2523  while (len > 0) {
2524  if (is->audio_buf_index >= is->audio_buf_size) {
2525  audio_size = audio_decode_frame(is);
2526  if (audio_size < 0) {
2527  /* if error, just output silence */
2528  is->audio_buf = is->silence_buf;
2529  is->audio_buf_size = sizeof(is->silence_buf) / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2530  } else {
2531  if (is->show_mode != SHOW_MODE_VIDEO)
2532  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2533  is->audio_buf_size = audio_size;
2534  }
2535  is->audio_buf_index = 0;
2536  }
2537  len1 = is->audio_buf_size - is->audio_buf_index;
2538  if (len1 > len)
2539  len1 = len;
2540  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2541  len -= len1;
2542  stream += len1;
2543  is->audio_buf_index += len1;
2544  }
2546  /* Let's assume the audio driver that is used by SDL has two periods. */
2547  if (!isnan(is->audio_clock)) {
2549  sync_clock_to_slave(&is->extclk, &is->audclk);
2550  }
2551 }
2552 
2553 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2554 {
2555  SDL_AudioSpec wanted_spec, spec;
2556  const char *env;
2557  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2558  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2559  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2560 
2561  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2562  if (env) {
2563  wanted_nb_channels = atoi(env);
2564  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2565  }
2566  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2567  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2568  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2569  }
2570  wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2571  wanted_spec.channels = wanted_nb_channels;
2572  wanted_spec.freq = wanted_sample_rate;
2573  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2574  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2575  return -1;
2576  }
2577  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2578  next_sample_rate_idx--;
2579  wanted_spec.format = AUDIO_S16SYS;
2580  wanted_spec.silence = 0;
2581  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2582  wanted_spec.callback = sdl_audio_callback;
2583  wanted_spec.userdata = opaque;
2584  while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2585  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2586  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2587  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2588  if (!wanted_spec.channels) {
2589  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2590  wanted_spec.channels = wanted_nb_channels;
2591  if (!wanted_spec.freq) {
2592  av_log(NULL, AV_LOG_ERROR,
2593  "No more combinations to try, audio open failed\n");
2594  return -1;
2595  }
2596  }
2597  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2598  }
2599  if (spec.format != AUDIO_S16SYS) {
2600  av_log(NULL, AV_LOG_ERROR,
2601  "SDL advised audio format %d is not supported!\n", spec.format);
2602  return -1;
2603  }
2604  if (spec.channels != wanted_spec.channels) {
2605  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2606  if (!wanted_channel_layout) {
2607  av_log(NULL, AV_LOG_ERROR,
2608  "SDL advised channel count %d is not supported!\n", spec.channels);
2609  return -1;
2610  }
2611  }
2612 
2613  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2614  audio_hw_params->freq = spec.freq;
2615  audio_hw_params->channel_layout = wanted_channel_layout;
2616  audio_hw_params->channels = spec.channels;
2617  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
2618  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2619  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2620  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2621  return -1;
2622  }
2623  return spec.size;
2624 }
2625 
2626 /* open a given stream. Return 0 if OK */
2627 static int stream_component_open(VideoState *is, int stream_index)
2628 {
2629  AVFormatContext *ic = is->ic;
2630  AVCodecContext *avctx;
2631  AVCodec *codec;
2632  const char *forced_codec_name = NULL;
2633  AVDictionary *opts;
2634  AVDictionaryEntry *t = NULL;
2635  int sample_rate, nb_channels;
2636  int64_t channel_layout;
2637  int ret = 0;
2638  int stream_lowres = lowres;
2639 
2640  if (stream_index < 0 || stream_index >= ic->nb_streams)
2641  return -1;
2642  avctx = ic->streams[stream_index]->codec;
2643 
2644  codec = avcodec_find_decoder(avctx->codec_id);
2645 
2646  switch(avctx->codec_type){
2647  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2648  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2649  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2650  }
2651  if (forced_codec_name)
2652  codec = avcodec_find_decoder_by_name(forced_codec_name);
2653  if (!codec) {
2654  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2655  "No codec could be found with name '%s'\n", forced_codec_name);
2656  else av_log(NULL, AV_LOG_WARNING,
2657  "No codec could be found with id %d\n", avctx->codec_id);
2658  return -1;
2659  }
2660 
2661  avctx->codec_id = codec->id;
2662  if(stream_lowres > av_codec_get_max_lowres(codec)){
2663  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2664  av_codec_get_max_lowres(codec));
2665  stream_lowres = av_codec_get_max_lowres(codec);
2666  }
2667  av_codec_set_lowres(avctx, stream_lowres);
2668 
2669  if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2670  if (fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2671  if(codec->capabilities & CODEC_CAP_DR1)
2672  avctx->flags |= CODEC_FLAG_EMU_EDGE;
2673 
2674  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2675  if (!av_dict_get(opts, "threads", NULL, 0))
2676  av_dict_set(&opts, "threads", "auto", 0);
2677  if (stream_lowres)
2678  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2679  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2680  av_dict_set(&opts, "refcounted_frames", "1", 0);
2681  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2682  goto fail;
2683  }
2684  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2685  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2687  goto fail;
2688  }
2689 
2690  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2691  switch (avctx->codec_type) {
2692  case AVMEDIA_TYPE_AUDIO:
2693 #if CONFIG_AVFILTER
2694  {
2695  AVFilterLink *link;
2696 
2697  is->audio_filter_src.freq = avctx->sample_rate;
2698  is->audio_filter_src.channels = avctx->channels;
2699  is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2700  is->audio_filter_src.fmt = avctx->sample_fmt;
2701  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2702  goto fail;
2703  link = is->out_audio_filter->inputs[0];
2704  sample_rate = link->sample_rate;
2705  nb_channels = link->channels;
2706  channel_layout = link->channel_layout;
2707  }
2708 #else
2709  sample_rate = avctx->sample_rate;
2710  nb_channels = avctx->channels;
2711  channel_layout = avctx->channel_layout;
2712 #endif
2713 
2714  /* prepare audio output */
2715  if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2716  goto fail;
2717  is->audio_hw_buf_size = ret;
2718  is->audio_src = is->audio_tgt;
2719  is->audio_buf_size = 0;
2720  is->audio_buf_index = 0;
2721 
2722  /* init averaging filter */
2723  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2724  is->audio_diff_avg_count = 0;
2725  /* since we do not have a precise anough audio fifo fullness,
2726  we correct audio sync only if larger than this threshold */
2728 
2729  is->audio_stream = stream_index;
2730  is->audio_st = ic->streams[stream_index];
2731 
2732  packet_queue_start(&is->audioq);
2733  decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
2735  is->auddec.start_pts = is->audio_st->start_time;
2737  }
2738  is->audio_tid = SDL_CreateThread(audio_thread, is);
2739  SDL_PauseAudio(0);
2740  break;
2741  case AVMEDIA_TYPE_VIDEO:
2742  is->video_stream = stream_index;
2743  is->video_st = ic->streams[stream_index];
2744 
2745  packet_queue_start(&is->videoq);
2746  decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
2747  is->video_tid = SDL_CreateThread(video_thread, is);
2748  is->queue_attachments_req = 1;
2749  break;
2750  case AVMEDIA_TYPE_SUBTITLE:
2751  is->subtitle_stream = stream_index;
2752  is->subtitle_st = ic->streams[stream_index];
2753 
2755  decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
2756  is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2757  break;
2758  default:
2759  break;
2760  }
2761 
2762 fail:
2763  av_dict_free(&opts);
2764 
2765  return ret;
2766 }
2767 
2768 static void stream_component_close(VideoState *is, int stream_index)
2769 {
2770  AVFormatContext *ic = is->ic;
2771  AVCodecContext *avctx;
2772 
2773  if (stream_index < 0 || stream_index >= ic->nb_streams)
2774  return;
2775  avctx = ic->streams[stream_index]->codec;
2776 
2777  switch (avctx->codec_type) {
2778  case AVMEDIA_TYPE_AUDIO:
2779  packet_queue_abort(&is->audioq);
2780  frame_queue_signal(&is->sampq);
2781  SDL_CloseAudio();
2782  SDL_WaitThread(is->audio_tid, NULL);
2783 
2784  decoder_destroy(&is->auddec);
2785  packet_queue_flush(&is->audioq);
2786  swr_free(&is->swr_ctx);
2787  av_freep(&is->audio_buf1);
2788  is->audio_buf1_size = 0;
2789  is->audio_buf = NULL;
2790 
2791  if (is->rdft) {
2792  av_rdft_end(is->rdft);
2793  av_freep(&is->rdft_data);
2794  is->rdft = NULL;
2795  is->rdft_bits = 0;
2796  }
2797  break;
2798  case AVMEDIA_TYPE_VIDEO:
2799  packet_queue_abort(&is->videoq);
2800 
2801  /* note: we also signal this mutex to make sure we deblock the
2802  video thread in all cases */
2803  frame_queue_signal(&is->pictq);
2804 
2805  SDL_WaitThread(is->video_tid, NULL);
2806 
2807  decoder_destroy(&is->viddec);
2808  packet_queue_flush(&is->videoq);
2809  break;
2810  case AVMEDIA_TYPE_SUBTITLE:
2812 
2813  /* note: we also signal this mutex to make sure we deblock the
2814  video thread in all cases */
2815  frame_queue_signal(&is->subpq);
2816 
2817  SDL_WaitThread(is->subtitle_tid, NULL);
2818 
2819  decoder_destroy(&is->subdec);
2821  break;
2822  default:
2823  break;
2824  }
2825 
2826  ic->streams[stream_index]->discard = AVDISCARD_ALL;
2827  avcodec_close(avctx);
2828  switch (avctx->codec_type) {
2829  case AVMEDIA_TYPE_AUDIO:
2830  is->audio_st = NULL;
2831  is->audio_stream = -1;
2832  break;
2833  case AVMEDIA_TYPE_VIDEO:
2834  is->video_st = NULL;
2835  is->video_stream = -1;
2836  break;
2837  case AVMEDIA_TYPE_SUBTITLE:
2838  is->subtitle_st = NULL;
2839  is->subtitle_stream = -1;
2840  break;
2841  default:
2842  break;
2843  }
2844 }
2845 
2846 static int decode_interrupt_cb(void *ctx)
2847 {
2848  VideoState *is = ctx;
2849  return is->abort_request;
2850 }
2851 
2853 {
2854  if( !strcmp(s->iformat->name, "rtp")
2855  || !strcmp(s->iformat->name, "rtsp")
2856  || !strcmp(s->iformat->name, "sdp")
2857  )
2858  return 1;
2859 
2860  if(s->pb && ( !strncmp(s->filename, "rtp:", 4)
2861  || !strncmp(s->filename, "udp:", 4)
2862  )
2863  )
2864  return 1;
2865  return 0;
2866 }
2867 
2868 /* this thread gets the stream from the disk or the network */
2869 static int read_thread(void *arg)
2870 {
2871  VideoState *is = arg;
2872  AVFormatContext *ic = NULL;
2873  int err, i, ret;
2874  int st_index[AVMEDIA_TYPE_NB];
2875  AVPacket pkt1, *pkt = &pkt1;
2876  int eof = 0;
2877  int64_t stream_start_time;
2878  int pkt_in_play_range = 0;
2879  AVDictionaryEntry *t;
2880  AVDictionary **opts;
2881  int orig_nb_streams;
2882  SDL_mutex *wait_mutex = SDL_CreateMutex();
2883  int scan_all_pmts_set = 0;
2884 
2885  memset(st_index, -1, sizeof(st_index));
2886  is->last_video_stream = is->video_stream = -1;
2887  is->last_audio_stream = is->audio_stream = -1;
2888  is->last_subtitle_stream = is->subtitle_stream = -1;
2889 
2890  ic = avformat_alloc_context();
2892  ic->interrupt_callback.opaque = is;
2893  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2894  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2895  scan_all_pmts_set = 1;
2896  }
2897  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2898  if (err < 0) {
2899  print_error(is->filename, err);
2900  ret = -1;
2901  goto fail;
2902  }
2903  if (scan_all_pmts_set)
2904  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2905 
2906  if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2907  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2909  goto fail;
2910  }
2911  is->ic = ic;
2912 
2913  if (genpts)
2914  ic->flags |= AVFMT_FLAG_GENPTS;
2915 
2917 
2919  orig_nb_streams = ic->nb_streams;
2920 
2921  err = avformat_find_stream_info(ic, opts);
2922 
2923  for (i = 0; i < orig_nb_streams; i++)
2924  av_dict_free(&opts[i]);
2925  av_freep(&opts);
2926 
2927  if (err < 0) {
2928  av_log(NULL, AV_LOG_WARNING,
2929  "%s: could not find codec parameters\n", is->filename);
2930  ret = -1;
2931  goto fail;
2932  }
2933 
2934  if (ic->pb)
2935  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2936 
2937  if (seek_by_bytes < 0)
2938  seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2939 
2940  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2941 
2942  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2943  window_title = av_asprintf("%s - %s", t->value, input_filename);
2944 
2945  /* if seeking requested, we execute it */
2946  if (start_time != AV_NOPTS_VALUE) {
2947  int64_t timestamp;
2948 
2949  timestamp = start_time;
2950  /* add the stream start time */
2951  if (ic->start_time != AV_NOPTS_VALUE)
2952  timestamp += ic->start_time;
2953  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2954  if (ret < 0) {
2955  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2956  is->filename, (double)timestamp / AV_TIME_BASE);
2957  }
2958  }
2959 
2960  is->realtime = is_realtime(ic);
2961 
2962  if (show_status)
2963  av_dump_format(ic, 0, is->filename, 0);
2964 
2965  for (i = 0; i < ic->nb_streams; i++) {
2966  AVStream *st = ic->streams[i];
2967  enum AVMediaType type = st->codec->codec_type;
2968  st->discard = AVDISCARD_ALL;
2969  if (wanted_stream_spec[type] && st_index[type] == -1)
2970  if (avformat_match_stream_specifier(ic, st, wanted_stream_spec[type]) > 0)
2971  st_index[type] = i;
2972  }
2973  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2974  if (wanted_stream_spec[i] && st_index[i] == -1) {
2975  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2976  st_index[i] = INT_MAX;
2977  }
2978  }
2979 
2980  if (!video_disable)
2981  st_index[AVMEDIA_TYPE_VIDEO] =
2983  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2984  if (!audio_disable)
2985  st_index[AVMEDIA_TYPE_AUDIO] =
2987  st_index[AVMEDIA_TYPE_AUDIO],
2988  st_index[AVMEDIA_TYPE_VIDEO],
2989  NULL, 0);
2991  st_index[AVMEDIA_TYPE_SUBTITLE] =
2993  st_index[AVMEDIA_TYPE_SUBTITLE],
2994  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2995  st_index[AVMEDIA_TYPE_AUDIO] :
2996  st_index[AVMEDIA_TYPE_VIDEO]),
2997  NULL, 0);
2998 
2999  is->show_mode = show_mode;
3000  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
3001  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
3002  AVCodecContext *avctx = st->codec;
3003  AVRational sar = av_guess_sample_aspect_ratio(ic, st, NULL);
3004  if (avctx->width)
3005  set_default_window_size(avctx->width, avctx->height, sar);
3006  }
3007 
3008  /* open the streams */
3009  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
3010  stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
3011  }
3012 
3013  ret = -1;
3014  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
3015  ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
3016  }
3017  if (is->show_mode == SHOW_MODE_NONE)
3018  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
3019 
3020  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
3021  stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
3022  }
3023 
3024  if (is->video_stream < 0 && is->audio_stream < 0) {
3025  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
3026  is->filename);
3027  ret = -1;
3028  goto fail;
3029  }
3030 
3031  if (infinite_buffer < 0 && is->realtime)
3032  infinite_buffer = 1;
3033 
3034  for (;;) {
3035  if (is->abort_request)
3036  break;
3037  if (is->paused != is->last_paused) {
3038  is->last_paused = is->paused;
3039  if (is->paused)
3040  is->read_pause_return = av_read_pause(ic);
3041  else
3042  av_read_play(ic);
3043  }
3044 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
3045  if (is->paused &&
3046  (!strcmp(ic->iformat->name, "rtsp") ||
3047  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
3048  /* wait 10 ms to avoid trying to get another packet */
3049  /* XXX: horrible */
3050  SDL_Delay(10);
3051  continue;
3052  }
3053 #endif
3054  if (is->seek_req) {
3055  int64_t seek_target = is->seek_pos;
3056  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
3057  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
3058 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
3059 // of the seek_pos/seek_rel variables
3060 
3061  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
3062  if (ret < 0) {
3063  av_log(NULL, AV_LOG_ERROR,
3064  "%s: error while seeking\n", is->ic->filename);
3065  } else {
3066  if (is->audio_stream >= 0) {
3067  packet_queue_flush(&is->audioq);
3068  packet_queue_put(&is->audioq, &flush_pkt);
3069  }
3070  if (is->subtitle_stream >= 0) {
3072  packet_queue_put(&is->subtitleq, &flush_pkt);
3073  }
3074  if (is->video_stream >= 0) {
3075  packet_queue_flush(&is->videoq);
3076  packet_queue_put(&is->videoq, &flush_pkt);
3077  }
3078  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
3079  set_clock(&is->extclk, NAN, 0);
3080  } else {
3081  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
3082  }
3083  }
3084  is->seek_req = 0;
3085  is->queue_attachments_req = 1;
3086  eof = 0;
3087  if (is->paused)
3088  step_to_next_frame(is);
3089  }
3090  if (is->queue_attachments_req) {
3092  AVPacket copy;
3093  if ((ret = av_copy_packet(&copy, &is->video_st->attached_pic)) < 0)
3094  goto fail;
3095  packet_queue_put(&is->videoq, &copy);
3097  }
3098  is->queue_attachments_req = 0;
3099  }
3100 
3101  /* if the queue are full, no need to read more */
3102  if (infinite_buffer<1 &&
3103  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
3104  || ( (is->audioq .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
3105  && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request
3107  && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
3108  /* wait 10 ms */
3109  SDL_LockMutex(wait_mutex);
3110  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3111  SDL_UnlockMutex(wait_mutex);
3112  continue;
3113  }
3114  if (!is->paused &&
3115  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3116  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3117  if (loop != 1 && (!loop || --loop)) {
3118  stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
3119  } else if (autoexit) {
3120  ret = AVERROR_EOF;
3121  goto fail;
3122  }
3123  }
3124  ret = av_read_frame(ic, pkt);
3125  if (ret < 0) {
3126  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !eof) {
3127  if (is->video_stream >= 0)
3129  if (is->audio_stream >= 0)
3131  if (is->subtitle_stream >= 0)
3133  eof = 1;
3134  }
3135  if (ic->pb && ic->pb->error)
3136  break;
3137  SDL_LockMutex(wait_mutex);
3138  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3139  SDL_UnlockMutex(wait_mutex);
3140  continue;
3141  } else {
3142  eof = 0;
3143  }
3144  /* check if packet is in play range specified by user, then queue, otherwise discard */
3145  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3146  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3147  (pkt->pts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3148  av_q2d(ic->streams[pkt->stream_index]->time_base) -
3149  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3150  <= ((double)duration / 1000000);
3151  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3152  packet_queue_put(&is->audioq, pkt);
3153  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3155  packet_queue_put(&is->videoq, pkt);
3156  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3157  packet_queue_put(&is->subtitleq, pkt);
3158  } else {
3159  av_free_packet(pkt);
3160  }
3161  }
3162  /* wait until the end */
3163  while (!is->abort_request) {
3164  SDL_Delay(100);
3165  }
3166 
3167  ret = 0;
3168  fail:
3169  /* close each stream */
3170  if (is->audio_stream >= 0)
3172  if (is->video_stream >= 0)
3174  if (is->subtitle_stream >= 0)
3176  if (ic) {
3177  avformat_close_input(&ic);
3178  is->ic = NULL;
3179  }
3180 
3181  if (ret != 0) {
3182  SDL_Event event;
3183 
3184  event.type = FF_QUIT_EVENT;
3185  event.user.data1 = is;
3186  SDL_PushEvent(&event);
3187  }
3188  SDL_DestroyMutex(wait_mutex);
3189  return 0;
3190 }
3191 
3192 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
3193 {
3194  VideoState *is;
3195 
3196  is = av_mallocz(sizeof(VideoState));
3197  if (!is)
3198  return NULL;
3199  av_strlcpy(is->filename, filename, sizeof(is->filename));
3200  is->iformat = iformat;
3201  is->ytop = 0;
3202  is->xleft = 0;
3203 
3204  /* start video display */
3205  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3206  goto fail;
3207  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3208  goto fail;
3209  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3210  goto fail;
3211 
3212  packet_queue_init(&is->videoq);
3213  packet_queue_init(&is->audioq);
3215 
3216  is->continue_read_thread = SDL_CreateCond();
3217 
3218  init_clock(&is->vidclk, &is->videoq.serial);
3219  init_clock(&is->audclk, &is->audioq.serial);
3220  init_clock(&is->extclk, &is->extclk.serial);
3221  is->audio_clock_serial = -1;
3222  is->av_sync_type = av_sync_type;
3223  is->read_tid = SDL_CreateThread(read_thread, is);
3224  if (!is->read_tid) {
3225 fail:
3226  stream_close(is);
3227  return NULL;
3228  }
3229  return is;
3230 }
3231 
3233 {
3234  AVFormatContext *ic = is->ic;
3235  int start_index, stream_index;
3236  int old_index;
3237  AVStream *st;
3238  AVProgram *p = NULL;
3239  int nb_streams = is->ic->nb_streams;
3240 
3241  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3242  start_index = is->last_video_stream;
3243  old_index = is->video_stream;
3244  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3245  start_index = is->last_audio_stream;
3246  old_index = is->audio_stream;
3247  } else {
3248  start_index = is->last_subtitle_stream;
3249  old_index = is->subtitle_stream;
3250  }
3251  stream_index = start_index;
3252 
3253  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3254  p = av_find_program_from_stream(ic, NULL, is->video_stream);
3255  if (p) {
3256  nb_streams = p->nb_stream_indexes;
3257  for (start_index = 0; start_index < nb_streams; start_index++)
3258  if (p->stream_index[start_index] == stream_index)
3259  break;
3260  if (start_index == nb_streams)
3261  start_index = -1;
3262  stream_index = start_index;
3263  }
3264  }
3265 
3266  for (;;) {
3267  if (++stream_index >= nb_streams)
3268  {
3269  if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3270  {
3271  stream_index = -1;
3272  is->last_subtitle_stream = -1;
3273  goto the_end;
3274  }
3275  if (start_index == -1)
3276  return;
3277  stream_index = 0;
3278  }
3279  if (stream_index == start_index)
3280  return;
3281  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3282  if (st->codec->codec_type == codec_type) {
3283  /* check that parameters are OK */
3284  switch (codec_type) {
3285  case AVMEDIA_TYPE_AUDIO:
3286  if (st->codec->sample_rate != 0 &&
3287  st->codec->channels != 0)
3288  goto the_end;
3289  break;
3290  case AVMEDIA_TYPE_VIDEO:
3291  case AVMEDIA_TYPE_SUBTITLE:
3292  goto the_end;
3293  default:
3294  break;
3295  }
3296  }
3297  }
3298  the_end:
3299  if (p && stream_index != -1)
3300  stream_index = p->stream_index[stream_index];
3301  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3302  av_get_media_type_string(codec_type),
3303  old_index,
3304  stream_index);
3305 
3306  stream_component_close(is, old_index);
3307  stream_component_open(is, stream_index);
3308 }
3309 
3310 
3312 {
3313 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
3314  /* OS X needs to reallocate the SDL overlays */
3315  int i;
3316  for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
3317  is->pictq.queue[i].reallocate = 1;
3318 #endif
3320  video_open(is, 1, NULL);
3321 }
3322 
3324 {
3325  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
3326  int next = is->show_mode;
3327  do {
3328  next = (next + 1) % SHOW_MODE_NB;
3329  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3330  if (is->show_mode != next) {
3332  is->xleft, is->ytop, is->width, is->height,
3333  bgcolor, 1);
3334  is->force_refresh = 1;
3335  is->show_mode = next;
3336  }
3337 }
3338 
3339 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3340  double remaining_time = 0.0;
3341  SDL_PumpEvents();
3342  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
3344  SDL_ShowCursor(0);
3345  cursor_hidden = 1;
3346  }
3347  if (remaining_time > 0.0)
3348  av_usleep((int64_t)(remaining_time * 1000000.0));
3349  remaining_time = REFRESH_RATE;
3350  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3351  video_refresh(is, &remaining_time);
3352  SDL_PumpEvents();
3353  }
3354 }
3355 
3356 static void seek_chapter(VideoState *is, int incr)
3357 {
3358  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3359  int i;
3360 
3361  if (!is->ic->nb_chapters)
3362  return;
3363 
3364  /* find the current chapter */
3365  for (i = 0; i < is->ic->nb_chapters; i++) {
3366  AVChapter *ch = is->ic->chapters[i];
3367  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3368  i--;
3369  break;
3370  }
3371  }
3372 
3373  i += incr;
3374  i = FFMAX(i, 0);
3375  if (i >= is->ic->nb_chapters)
3376  return;
3377 
3378  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3379  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3380  AV_TIME_BASE_Q), 0, 0);
3381 }
3382 
3383 /* handle an event sent by the GUI */
3384 static void event_loop(VideoState *cur_stream)
3385 {
3386  SDL_Event event;
3387  double incr, pos, frac;
3388 
3389  for (;;) {
3390  double x;
3391  refresh_loop_wait_event(cur_stream, &event);
3392  switch (event.type) {
3393  case SDL_KEYDOWN:
3394  if (exit_on_keydown) {
3395  do_exit(cur_stream);
3396  break;
3397  }
3398  switch (event.key.keysym.sym) {
3399  case SDLK_ESCAPE:
3400  case SDLK_q:
3401  do_exit(cur_stream);
3402  break;
3403  case SDLK_f:
3404  toggle_full_screen(cur_stream);
3405  cur_stream->force_refresh = 1;
3406  break;
3407  case SDLK_p:
3408  case SDLK_SPACE:
3409  toggle_pause(cur_stream);
3410  break;
3411  case SDLK_s: // S: Step to next frame
3412  step_to_next_frame(cur_stream);
3413  break;
3414  case SDLK_a:
3416  break;
3417  case SDLK_v:
3419  break;
3420  case SDLK_c:
3424  break;
3425  case SDLK_t:
3427  break;
3428  case SDLK_w:
3429 #if CONFIG_AVFILTER
3430  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3431  if (++cur_stream->vfilter_idx >= nb_vfilters)
3432  cur_stream->vfilter_idx = 0;
3433  } else {
3434  cur_stream->vfilter_idx = 0;
3435  toggle_audio_display(cur_stream);
3436  }
3437 #else
3438  toggle_audio_display(cur_stream);
3439 #endif
3440  break;
3441  case SDLK_PAGEUP:
3442  if (cur_stream->ic->nb_chapters <= 1) {
3443  incr = 600.0;
3444  goto do_seek;
3445  }
3446  seek_chapter(cur_stream, 1);
3447  break;
3448  case SDLK_PAGEDOWN:
3449  if (cur_stream->ic->nb_chapters <= 1) {
3450  incr = -600.0;
3451  goto do_seek;
3452  }
3453  seek_chapter(cur_stream, -1);
3454  break;
3455  case SDLK_LEFT:
3456  incr = -10.0;
3457  goto do_seek;
3458  case SDLK_RIGHT:
3459  incr = 10.0;
3460  goto do_seek;
3461  case SDLK_UP:
3462  incr = 60.0;
3463  goto do_seek;
3464  case SDLK_DOWN:
3465  incr = -60.0;
3466  do_seek:
3467  if (seek_by_bytes) {
3468  pos = -1;
3469  if (pos < 0 && cur_stream->video_stream >= 0)
3470  pos = frame_queue_last_pos(&cur_stream->pictq);
3471  if (pos < 0 && cur_stream->audio_stream >= 0)
3472  pos = frame_queue_last_pos(&cur_stream->sampq);
3473  if (pos < 0)
3474  pos = avio_tell(cur_stream->ic->pb);
3475  if (cur_stream->ic->bit_rate)
3476  incr *= cur_stream->ic->bit_rate / 8.0;
3477  else
3478  incr *= 180000.0;
3479  pos += incr;
3480  stream_seek(cur_stream, pos, incr, 1);
3481  } else {
3482  pos = get_master_clock(cur_stream);
3483  if (isnan(pos))
3484  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3485  pos += incr;
3486  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3487  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3488  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3489  }
3490  break;
3491  default:
3492  break;
3493  }
3494  break;
3495  case SDL_VIDEOEXPOSE:
3496  cur_stream->force_refresh = 1;
3497  break;
3498  case SDL_MOUSEBUTTONDOWN:
3499  if (exit_on_mousedown) {
3500  do_exit(cur_stream);
3501  break;
3502  }
3503  case SDL_MOUSEMOTION:
3504  if (cursor_hidden) {
3505  SDL_ShowCursor(1);
3506  cursor_hidden = 0;
3507  }
3509  if (event.type == SDL_MOUSEBUTTONDOWN) {
3510  x = event.button.x;
3511  } else {
3512  if (event.motion.state != SDL_PRESSED)
3513  break;
3514  x = event.motion.x;
3515  }
3516  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3517  uint64_t size = avio_size(cur_stream->ic->pb);
3518  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3519  } else {
3520  int64_t ts;
3521  int ns, hh, mm, ss;
3522  int tns, thh, tmm, tss;
3523  tns = cur_stream->ic->duration / 1000000LL;
3524  thh = tns / 3600;
3525  tmm = (tns % 3600) / 60;
3526  tss = (tns % 60);
3527  frac = x / cur_stream->width;
3528  ns = frac * tns;
3529  hh = ns / 3600;
3530  mm = (ns % 3600) / 60;
3531  ss = (ns % 60);
3532  av_log(NULL, AV_LOG_INFO,
3533  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3534  hh, mm, ss, thh, tmm, tss);
3535  ts = frac * cur_stream->ic->duration;
3536  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3537  ts += cur_stream->ic->start_time;
3538  stream_seek(cur_stream, ts, 0, 0);
3539  }
3540  break;
3541  case SDL_VIDEORESIZE:
3542  screen = SDL_SetVideoMode(FFMIN(16383, event.resize.w), event.resize.h, 0,
3543  SDL_HWSURFACE|(is_full_screen?SDL_FULLSCREEN:SDL_RESIZABLE)|SDL_ASYNCBLIT|SDL_HWACCEL);
3544  if (!screen) {
3545  av_log(NULL, AV_LOG_FATAL, "Failed to set video mode\n");
3546  do_exit(cur_stream);
3547  }
3548  screen_width = cur_stream->width = screen->w;
3549  screen_height = cur_stream->height = screen->h;
3550  cur_stream->force_refresh = 1;
3551  break;
3552  case SDL_QUIT:
3553  case FF_QUIT_EVENT:
3554  do_exit(cur_stream);
3555  break;
3556  case FF_ALLOC_EVENT:
3557  alloc_picture(event.user.data1);
3558  break;
3559  default:
3560  break;
3561  }
3562  }
3563 }
3564 
3565 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3566 {
3567  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3568  return opt_default(NULL, "video_size", arg);
3569 }
3570 
3571 static int opt_width(void *optctx, const char *opt, const char *arg)
3572 {
3573  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3574  return 0;
3575 }
3576 
3577 static int opt_height(void *optctx, const char *opt, const char *arg)
3578 {
3579  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3580  return 0;
3581 }
3582 
3583 static int opt_format(void *optctx, const char *opt, const char *arg)
3584 {
3585  file_iformat = av_find_input_format(arg);
3586  if (!file_iformat) {
3587  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3588  return AVERROR(EINVAL);
3589  }
3590  return 0;
3591 }
3592 
3593 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3594 {
3595  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3596  return opt_default(NULL, "pixel_format", arg);
3597 }
3598 
3599 static int opt_sync(void *optctx, const char *opt, const char *arg)
3600 {
3601  if (!strcmp(arg, "audio"))
3603  else if (!strcmp(arg, "video"))
3605  else if (!strcmp(arg, "ext"))
3607  else {
3608  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3609  exit(1);
3610  }
3611  return 0;
3612 }
3613 
3614 static int opt_seek(void *optctx, const char *opt, const char *arg)
3615 {
3616  start_time = parse_time_or_die(opt, arg, 1);
3617  return 0;
3618 }
3619 
3620 static int opt_duration(void *optctx, const char *opt, const char *arg)
3621 {
3622  duration = parse_time_or_die(opt, arg, 1);
3623  return 0;
3624 }
3625 
3626 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3627 {
3628  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3629  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3630  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3631  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3632  return 0;
3633 }
3634 
3635 static void opt_input_file(void *optctx, const char *filename)
3636 {
3637  if (input_filename) {
3638  av_log(NULL, AV_LOG_FATAL,
3639  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3640  filename, input_filename);
3641  exit(1);
3642  }
3643  if (!strcmp(filename, "-"))
3644  filename = "pipe:";
3645  input_filename = filename;
3646 }
3647 
3648 static int opt_codec(void *optctx, const char *opt, const char *arg)
3649 {
3650  const char *spec = strchr(opt, ':');
3651  if (!spec) {
3652  av_log(NULL, AV_LOG_ERROR,
3653  "No media specifier was specified in '%s' in option '%s'\n",
3654  arg, opt);
3655  return AVERROR(EINVAL);
3656  }
3657  spec++;
3658  switch (spec[0]) {
3659  case 'a' : audio_codec_name = arg; break;
3660  case 's' : subtitle_codec_name = arg; break;
3661  case 'v' : video_codec_name = arg; break;
3662  default:
3663  av_log(NULL, AV_LOG_ERROR,
3664  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3665  return AVERROR(EINVAL);
3666  }
3667  return 0;
3668 }
3669 
3670 static int dummy;
3671 
3672 static const OptionDef options[] = {
3673 #include "cmdutils_common_opts.h"
3674  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3675  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3676  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3677  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3678  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3679  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3680  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3681  { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3682  { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3683  { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3684  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3685  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3686  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3687  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3688  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3689  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3690  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3691  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3692  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3693  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3694  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3695  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3696  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3697  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3698  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3699  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3700  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3701  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3702  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3703 #if CONFIG_AVFILTER
3704  { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3705  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3706 #endif
3707  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3708  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3709  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3710  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3711  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3712  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3713  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3714  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3715  { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3716  { NULL, },
3717 };
3718 
3719 static void show_usage(void)
3720 {
3721  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3722  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3723  av_log(NULL, AV_LOG_INFO, "\n");
3724 }
3725 
3726 void show_help_default(const char *opt, const char *arg)
3727 {
3729  show_usage();
3730  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3731  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3732  printf("\n");
3735 #if !CONFIG_AVFILTER
3737 #else
3739 #endif
3740  printf("\nWhile playing:\n"
3741  "q, ESC quit\n"
3742  "f toggle full screen\n"
3743  "p, SPC pause\n"
3744  "a cycle audio channel in the current program\n"
3745  "v cycle video channel\n"
3746  "t cycle subtitle channel in the current program\n"
3747  "c cycle program\n"
3748  "w cycle video filters or show modes\n"
3749  "s activate frame-step mode\n"
3750  "left/right seek backward/forward 10 seconds\n"
3751  "down/up seek backward/forward 1 minute\n"
3752  "page down/page up seek backward/forward 10 minutes\n"
3753  "mouse click seek to percentage in file corresponding to fraction of width\n"
3754  );
3755 }
3756 
3757 static int lockmgr(void **mtx, enum AVLockOp op)
3758 {
3759  switch(op) {
3760  case AV_LOCK_CREATE:
3761  *mtx = SDL_CreateMutex();
3762  if(!*mtx)
3763  return 1;
3764  return 0;
3765  case AV_LOCK_OBTAIN:
3766  return !!SDL_LockMutex(*mtx);
3767  case AV_LOCK_RELEASE:
3768  return !!SDL_UnlockMutex(*mtx);
3769  case AV_LOCK_DESTROY:
3770  SDL_DestroyMutex(*mtx);
3771  return 0;
3772  }
3773  return 1;
3774 }
3775 
3776 /* Called from the main */
3777 int main(int argc, char **argv)
3778 {
3779  int flags;
3780  VideoState *is;
3781  char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3782 
3784  parse_loglevel(argc, argv, options);
3785 
3786  /* register all codecs, demux and protocols */
3787 #if CONFIG_AVDEVICE
3789 #endif
3790 #if CONFIG_AVFILTER
3792 #endif
3793  av_register_all();
3795 
3796  init_opts();
3797 
3798  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3799  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3800 
3801  show_banner(argc, argv, options);
3802 
3803  parse_options(NULL, argc, argv, options, opt_input_file);
3804 
3805  if (!input_filename) {
3806  show_usage();
3807  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3808  av_log(NULL, AV_LOG_FATAL,
3809  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3810  exit(1);
3811  }
3812 
3813  if (display_disable) {
3814  video_disable = 1;
3815  }
3816  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3817  if (audio_disable)
3818  flags &= ~SDL_INIT_AUDIO;
3819  if (display_disable)
3820  SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3821 #if !defined(_WIN32) && !defined(__APPLE__)
3822  flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3823 #endif
3824  if (SDL_Init (flags)) {
3825  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3826  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3827  exit(1);
3828  }
3829 
3830  if (!display_disable) {
3831  const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3832  fs_screen_width = vi->current_w;
3833  fs_screen_height = vi->current_h;
3834  }
3835 
3836  SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3837  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3838  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3839 
3841  av_log(NULL, AV_LOG_FATAL, "Could not initialize lock manager!\n");
3842  do_exit(NULL);
3843  }
3844 
3845  av_init_packet(&flush_pkt);
3846  flush_pkt.data = (uint8_t *)&flush_pkt;
3847 
3848  is = stream_open(input_filename, file_iformat);
3849  if (!is) {
3850  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3851  do_exit(NULL);
3852  }
3853 
3854  event_loop(is);
3855 
3856  /* never returns */
3857 
3858  return 0;
3859 }