FFmpeg
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include "libavutil/avstring.h"
32 #include "libavutil/colorspace.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/dict.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/avassert.h"
40 #include "libavutil/time.h"
41 #include "libavformat/avformat.h"
42 #include "libavdevice/avdevice.h"
43 #include "libswscale/swscale.h"
44 #include "libavutil/opt.h"
45 #include "libavcodec/avfft.h"
47 
48 #if CONFIG_AVFILTER
49 # include "libavfilter/avcodec.h"
50 # include "libavfilter/avfilter.h"
52 # include "libavfilter/buffersink.h"
53 # include "libavfilter/buffersrc.h"
54 #endif
55 
56 #include <SDL.h>
57 #include <SDL_thread.h>
58 
59 #include "cmdutils.h"
60 
61 #include <assert.h>
62 
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65 
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 5
68 
69 /* SDL audio buffer size, in samples. Should be small to have precise
70  A/V sync as SDL does not have hardware buffer fullness info. */
71 #define SDL_AUDIO_BUFFER_SIZE 1024
72 
73 /* no AV sync correction is done if below the AV sync threshold */
74 #define AV_SYNC_THRESHOLD 0.01
75 /* no AV correction is done if too big error */
76 #define AV_NOSYNC_THRESHOLD 10.0
77 
78 /* maximum audio speed change to get correct sync */
79 #define SAMPLE_CORRECTION_PERCENT_MAX 10
80 
81 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
82 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
83 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
84 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
85 
86 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
87 #define AUDIO_DIFF_AVG_NB 20
88 
89 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
90 #define REFRESH_RATE 0.01
91 
92 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
93 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
94 #define SAMPLE_ARRAY_SIZE (8 * 65536)
95 
96 #define CURSOR_HIDE_DELAY 1000000
97 
98 static int64_t sws_flags = SWS_BICUBIC;
99 
100 typedef struct MyAVPacketList {
103  int serial;
105 
106 typedef struct PacketQueue {
109  int size;
111  int serial;
112  SDL_mutex *mutex;
113  SDL_cond *cond;
114 } PacketQueue;
115 
116 #define VIDEO_PICTURE_QUEUE_SIZE 4
117 #define SUBPICTURE_QUEUE_SIZE 4
118 
119 typedef struct VideoPicture {
120  double pts; // presentation timestamp for this picture
121  int64_t pos; // byte position in file
122  SDL_Overlay *bmp;
123  int width, height; /* source height & width */
127  int serial;
128 
129 #if CONFIG_AVFILTER
130  AVFilterBufferRef *picref;
131 #endif
132 } VideoPicture;
133 
134 typedef struct SubPicture {
135  double pts; /* presentation time stamp for this picture */
137 } SubPicture;
138 
139 typedef struct AudioParams {
140  int freq;
141  int channels;
142  int64_t channel_layout;
144 } AudioParams;
145 
146 enum {
147  AV_SYNC_AUDIO_MASTER, /* default choice */
149  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
150 };
151 
152 typedef struct VideoState {
153  SDL_Thread *read_tid;
154  SDL_Thread *video_tid;
159  int paused;
162  int seek_req;
164  int64_t seek_pos;
165  int64_t seek_rel;
168  int realtime;
169 
171 
173  double external_clock; ///< external clock base
174  double external_clock_drift; ///< external clock base - time (av_gettime) at which we updated external_clock
175  int64_t external_clock_time; ///< last reference time
176  double external_clock_speed; ///< speed of the external clock
177 
178  double audio_clock;
180  double audio_diff_cum; /* used for AV difference average computation */
190  unsigned int audio_buf_size; /* in bytes */
191  unsigned int audio_buf1_size;
192  int audio_buf_index; /* in bytes */
205 
206  enum ShowMode {
208  } show_mode;
215  int xpos;
217 
218  SDL_Thread *subtitle_tid;
225  SDL_mutex *subpq_mutex;
226  SDL_cond *subpq_cond;
227 
228  double frame_timer;
238  double video_current_pts; // current displayed pts
239  double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
240  int64_t video_current_pos; // current displayed file pos
241  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
245  SDL_mutex *pictq_mutex;
246  SDL_cond *pictq_cond;
247 #if !CONFIG_AVFILTER
249 #endif
251 
252  char filename[1024];
254  int step;
255 
256 #if CONFIG_AVFILTER
257  AVFilterContext *in_video_filter; // the first filter in the video chain
258  AVFilterContext *out_video_filter; // the last filter in the video chain
259  int use_dr1;
260  FrameBuffer *buffer_pool;
261 #endif
262 
264 
266 } VideoState;
267 
268 /* options specified by the user */
270 static const char *input_filename;
271 static const char *window_title;
272 static int fs_screen_width;
273 static int fs_screen_height;
274 static int default_width = 640;
275 static int default_height = 480;
276 static int screen_width = 0;
277 static int screen_height = 0;
278 static int audio_disable;
279 static int video_disable;
280 static int subtitle_disable;
282  [AVMEDIA_TYPE_AUDIO] = -1,
283  [AVMEDIA_TYPE_VIDEO] = -1,
284  [AVMEDIA_TYPE_SUBTITLE] = -1,
285 };
286 static int seek_by_bytes = -1;
287 static int display_disable;
288 static int show_status = 1;
290 static int64_t start_time = AV_NOPTS_VALUE;
291 static int64_t duration = AV_NOPTS_VALUE;
292 static int workaround_bugs = 1;
293 static int fast = 0;
294 static int genpts = 0;
295 static int lowres = 0;
296 static int idct = FF_IDCT_AUTO;
300 static int error_concealment = 3;
301 static int decoder_reorder_pts = -1;
302 static int autoexit;
303 static int exit_on_keydown;
304 static int exit_on_mousedown;
305 static int loop = 1;
306 static int framedrop = -1;
307 static int infinite_buffer = -1;
308 static enum ShowMode show_mode = SHOW_MODE_NONE;
309 static const char *audio_codec_name;
310 static const char *subtitle_codec_name;
311 static const char *video_codec_name;
312 double rdftspeed = 0.02;
313 static int64_t cursor_last_shown;
314 static int cursor_hidden = 0;
315 #if CONFIG_AVFILTER
316 static char *vfilters = NULL;
317 #endif
318 
319 /* current context */
320 static int is_full_screen;
321 static int64_t audio_callback_time;
322 
324 
325 #define FF_ALLOC_EVENT (SDL_USEREVENT)
326 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
327 
328 static SDL_Surface *screen;
329 
330 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
331 
333 {
334  MyAVPacketList *pkt1;
335 
336  if (q->abort_request)
337  return -1;
338 
339  pkt1 = av_malloc(sizeof(MyAVPacketList));
340  if (!pkt1)
341  return -1;
342  pkt1->pkt = *pkt;
343  pkt1->next = NULL;
344  if (pkt == &flush_pkt)
345  q->serial++;
346  pkt1->serial = q->serial;
347 
348  if (!q->last_pkt)
349  q->first_pkt = pkt1;
350  else
351  q->last_pkt->next = pkt1;
352  q->last_pkt = pkt1;
353  q->nb_packets++;
354  q->size += pkt1->pkt.size + sizeof(*pkt1);
355  /* XXX: should duplicate packet data in DV case */
356  SDL_CondSignal(q->cond);
357  return 0;
358 }
359 
361 {
362  int ret;
363 
364  /* duplicate the packet */
365  if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
366  return -1;
367 
368  SDL_LockMutex(q->mutex);
369  ret = packet_queue_put_private(q, pkt);
370  SDL_UnlockMutex(q->mutex);
371 
372  if (pkt != &flush_pkt && ret < 0)
373  av_free_packet(pkt);
374 
375  return ret;
376 }
377 
378 /* packet queue handling */
380 {
381  memset(q, 0, sizeof(PacketQueue));
382  q->mutex = SDL_CreateMutex();
383  q->cond = SDL_CreateCond();
384  q->abort_request = 1;
385 }
386 
388 {
389  MyAVPacketList *pkt, *pkt1;
390 
391  SDL_LockMutex(q->mutex);
392  for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
393  pkt1 = pkt->next;
394  av_free_packet(&pkt->pkt);
395  av_freep(&pkt);
396  }
397  q->last_pkt = NULL;
398  q->first_pkt = NULL;
399  q->nb_packets = 0;
400  q->size = 0;
401  SDL_UnlockMutex(q->mutex);
402 }
403 
405 {
407  SDL_DestroyMutex(q->mutex);
408  SDL_DestroyCond(q->cond);
409 }
410 
412 {
413  SDL_LockMutex(q->mutex);
414 
415  q->abort_request = 1;
416 
417  SDL_CondSignal(q->cond);
418 
419  SDL_UnlockMutex(q->mutex);
420 }
421 
423 {
424  SDL_LockMutex(q->mutex);
425  q->abort_request = 0;
426  packet_queue_put_private(q, &flush_pkt);
427  SDL_UnlockMutex(q->mutex);
428 }
429 
430 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
431 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
432 {
433  MyAVPacketList *pkt1;
434  int ret;
435 
436  SDL_LockMutex(q->mutex);
437 
438  for (;;) {
439  if (q->abort_request) {
440  ret = -1;
441  break;
442  }
443 
444  pkt1 = q->first_pkt;
445  if (pkt1) {
446  q->first_pkt = pkt1->next;
447  if (!q->first_pkt)
448  q->last_pkt = NULL;
449  q->nb_packets--;
450  q->size -= pkt1->pkt.size + sizeof(*pkt1);
451  *pkt = pkt1->pkt;
452  if (serial)
453  *serial = pkt1->serial;
454  av_free(pkt1);
455  ret = 1;
456  break;
457  } else if (!block) {
458  ret = 0;
459  break;
460  } else {
461  SDL_CondWait(q->cond, q->mutex);
462  }
463  }
464  SDL_UnlockMutex(q->mutex);
465  return ret;
466 }
467 
468 static inline void fill_rectangle(SDL_Surface *screen,
469  int x, int y, int w, int h, int color, int update)
470 {
471  SDL_Rect rect;
472  rect.x = x;
473  rect.y = y;
474  rect.w = w;
475  rect.h = h;
476  SDL_FillRect(screen, &rect, color);
477  if (update && w > 0 && h > 0)
478  SDL_UpdateRect(screen, x, y, w, h);
479 }
480 
481 /* draw only the border of a rectangle */
482 static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
483 {
484  int w1, w2, h1, h2;
485 
486  /* fill the background */
487  w1 = x;
488  if (w1 < 0)
489  w1 = 0;
490  w2 = width - (x + w);
491  if (w2 < 0)
492  w2 = 0;
493  h1 = y;
494  if (h1 < 0)
495  h1 = 0;
496  h2 = height - (y + h);
497  if (h2 < 0)
498  h2 = 0;
500  xleft, ytop,
501  w1, height,
502  color, update);
504  xleft + width - w2, ytop,
505  w2, height,
506  color, update);
508  xleft + w1, ytop,
509  width - w1 - w2, h1,
510  color, update);
512  xleft + w1, ytop + height - h2,
513  width - w1 - w2, h2,
514  color, update);
515 }
516 
517 #define ALPHA_BLEND(a, oldp, newp, s)\
518 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
519 
520 #define RGBA_IN(r, g, b, a, s)\
521 {\
522  unsigned int v = ((const uint32_t *)(s))[0];\
523  a = (v >> 24) & 0xff;\
524  r = (v >> 16) & 0xff;\
525  g = (v >> 8) & 0xff;\
526  b = v & 0xff;\
527 }
528 
529 #define YUVA_IN(y, u, v, a, s, pal)\
530 {\
531  unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
532  a = (val >> 24) & 0xff;\
533  y = (val >> 16) & 0xff;\
534  u = (val >> 8) & 0xff;\
535  v = val & 0xff;\
536 }
537 
538 #define YUVA_OUT(d, y, u, v, a)\
539 {\
540  ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
541 }
542 
543 
544 #define BPP 1
545 
546 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
547 {
548  int wrap, wrap3, width2, skip2;
549  int y, u, v, a, u1, v1, a1, w, h;
550  uint8_t *lum, *cb, *cr;
551  const uint8_t *p;
552  const uint32_t *pal;
553  int dstx, dsty, dstw, dsth;
554 
555  dstw = av_clip(rect->w, 0, imgw);
556  dsth = av_clip(rect->h, 0, imgh);
557  dstx = av_clip(rect->x, 0, imgw - dstw);
558  dsty = av_clip(rect->y, 0, imgh - dsth);
559  lum = dst->data[0] + dsty * dst->linesize[0];
560  cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
561  cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
562 
563  width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
564  skip2 = dstx >> 1;
565  wrap = dst->linesize[0];
566  wrap3 = rect->pict.linesize[0];
567  p = rect->pict.data[0];
568  pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
569 
570  if (dsty & 1) {
571  lum += dstx;
572  cb += skip2;
573  cr += skip2;
574 
575  if (dstx & 1) {
576  YUVA_IN(y, u, v, a, p, pal);
577  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
578  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
579  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
580  cb++;
581  cr++;
582  lum++;
583  p += BPP;
584  }
585  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
586  YUVA_IN(y, u, v, a, p, pal);
587  u1 = u;
588  v1 = v;
589  a1 = a;
590  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
591 
592  YUVA_IN(y, u, v, a, p + BPP, pal);
593  u1 += u;
594  v1 += v;
595  a1 += a;
596  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
597  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
598  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
599  cb++;
600  cr++;
601  p += 2 * BPP;
602  lum += 2;
603  }
604  if (w) {
605  YUVA_IN(y, u, v, a, p, pal);
606  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
607  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
608  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
609  p++;
610  lum++;
611  }
612  p += wrap3 - dstw * BPP;
613  lum += wrap - dstw - dstx;
614  cb += dst->linesize[1] - width2 - skip2;
615  cr += dst->linesize[2] - width2 - skip2;
616  }
617  for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
618  lum += dstx;
619  cb += skip2;
620  cr += skip2;
621 
622  if (dstx & 1) {
623  YUVA_IN(y, u, v, a, p, pal);
624  u1 = u;
625  v1 = v;
626  a1 = a;
627  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
628  p += wrap3;
629  lum += wrap;
630  YUVA_IN(y, u, v, a, p, pal);
631  u1 += u;
632  v1 += v;
633  a1 += a;
634  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
635  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
636  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
637  cb++;
638  cr++;
639  p += -wrap3 + BPP;
640  lum += -wrap + 1;
641  }
642  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
643  YUVA_IN(y, u, v, a, p, pal);
644  u1 = u;
645  v1 = v;
646  a1 = a;
647  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
648 
649  YUVA_IN(y, u, v, a, p + BPP, pal);
650  u1 += u;
651  v1 += v;
652  a1 += a;
653  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
654  p += wrap3;
655  lum += wrap;
656 
657  YUVA_IN(y, u, v, a, p, pal);
658  u1 += u;
659  v1 += v;
660  a1 += a;
661  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
662 
663  YUVA_IN(y, u, v, a, p + BPP, pal);
664  u1 += u;
665  v1 += v;
666  a1 += a;
667  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
668 
669  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
670  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
671 
672  cb++;
673  cr++;
674  p += -wrap3 + 2 * BPP;
675  lum += -wrap + 2;
676  }
677  if (w) {
678  YUVA_IN(y, u, v, a, p, pal);
679  u1 = u;
680  v1 = v;
681  a1 = a;
682  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
683  p += wrap3;
684  lum += wrap;
685  YUVA_IN(y, u, v, a, p, pal);
686  u1 += u;
687  v1 += v;
688  a1 += a;
689  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
690  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
691  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
692  cb++;
693  cr++;
694  p += -wrap3 + BPP;
695  lum += -wrap + 1;
696  }
697  p += wrap3 + (wrap3 - dstw * BPP);
698  lum += wrap + (wrap - dstw - dstx);
699  cb += dst->linesize[1] - width2 - skip2;
700  cr += dst->linesize[2] - width2 - skip2;
701  }
702  /* handle odd height */
703  if (h) {
704  lum += dstx;
705  cb += skip2;
706  cr += skip2;
707 
708  if (dstx & 1) {
709  YUVA_IN(y, u, v, a, p, pal);
710  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
711  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
712  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
713  cb++;
714  cr++;
715  lum++;
716  p += BPP;
717  }
718  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
719  YUVA_IN(y, u, v, a, p, pal);
720  u1 = u;
721  v1 = v;
722  a1 = a;
723  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
724 
725  YUVA_IN(y, u, v, a, p + BPP, pal);
726  u1 += u;
727  v1 += v;
728  a1 += a;
729  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
730  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
731  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
732  cb++;
733  cr++;
734  p += 2 * BPP;
735  lum += 2;
736  }
737  if (w) {
738  YUVA_IN(y, u, v, a, p, pal);
739  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
740  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
741  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
742  }
743  }
744 }
745 
747 {
748  avsubtitle_free(&sp->sub);
749 }
750 
751 static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, VideoPicture *vp)
752 {
753  float aspect_ratio;
754  int width, height, x, y;
755 
756  if (vp->sample_aspect_ratio.num == 0)
757  aspect_ratio = 0;
758  else
759  aspect_ratio = av_q2d(vp->sample_aspect_ratio);
760 
761  if (aspect_ratio <= 0.0)
762  aspect_ratio = 1.0;
763  aspect_ratio *= (float)vp->width / (float)vp->height;
764 
765  /* XXX: we suppose the screen has a 1.0 pixel ratio */
766  height = scr_height;
767  width = ((int)rint(height * aspect_ratio)) & ~1;
768  if (width > scr_width) {
769  width = scr_width;
770  height = ((int)rint(width / aspect_ratio)) & ~1;
771  }
772  x = (scr_width - width) / 2;
773  y = (scr_height - height) / 2;
774  rect->x = scr_xleft + x;
775  rect->y = scr_ytop + y;
776  rect->w = FFMAX(width, 1);
777  rect->h = FFMAX(height, 1);
778 }
779 
781 {
782  VideoPicture *vp;
783  SubPicture *sp;
784  AVPicture pict;
785  SDL_Rect rect;
786  int i;
787 
788  vp = &is->pictq[is->pictq_rindex];
789  if (vp->bmp) {
790  if (is->subtitle_st) {
791  if (is->subpq_size > 0) {
792  sp = &is->subpq[is->subpq_rindex];
793 
794  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
795  SDL_LockYUVOverlay (vp->bmp);
796 
797  pict.data[0] = vp->bmp->pixels[0];
798  pict.data[1] = vp->bmp->pixels[2];
799  pict.data[2] = vp->bmp->pixels[1];
800 
801  pict.linesize[0] = vp->bmp->pitches[0];
802  pict.linesize[1] = vp->bmp->pitches[2];
803  pict.linesize[2] = vp->bmp->pitches[1];
804 
805  for (i = 0; i < sp->sub.num_rects; i++)
806  blend_subrect(&pict, sp->sub.rects[i],
807  vp->bmp->w, vp->bmp->h);
808 
809  SDL_UnlockYUVOverlay (vp->bmp);
810  }
811  }
812  }
813 
814  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp);
815 
816  SDL_DisplayYUVOverlay(vp->bmp, &rect);
817 
818  if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
819  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
820  fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
821  is->last_display_rect = rect;
822  }
823  }
824 }
825 
826 static inline int compute_mod(int a, int b)
827 {
828  return a < 0 ? a%b + b : a%b;
829 }
830 
832 {
833  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
834  int ch, channels, h, h2, bgcolor, fgcolor;
835  int64_t time_diff;
836  int rdft_bits, nb_freq;
837 
838  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
839  ;
840  nb_freq = 1 << (rdft_bits - 1);
841 
842  /* compute display index : center on currently output samples */
843  channels = s->audio_tgt.channels;
844  nb_display_channels = channels;
845  if (!s->paused) {
846  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
847  n = 2 * channels;
848  delay = s->audio_write_buf_size;
849  delay /= n;
850 
851  /* to be more precise, we take into account the time spent since
852  the last buffer computation */
853  if (audio_callback_time) {
854  time_diff = av_gettime() - audio_callback_time;
855  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
856  }
857 
858  delay += 2 * data_used;
859  if (delay < data_used)
860  delay = data_used;
861 
862  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
863  if (s->show_mode == SHOW_MODE_WAVES) {
864  h = INT_MIN;
865  for (i = 0; i < 1000; i += channels) {
866  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
867  int a = s->sample_array[idx];
868  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
869  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
870  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
871  int score = a - d;
872  if (h < score && (b ^ c) < 0) {
873  h = score;
874  i_start = idx;
875  }
876  }
877  }
878 
879  s->last_i_start = i_start;
880  } else {
881  i_start = s->last_i_start;
882  }
883 
884  bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
885  if (s->show_mode == SHOW_MODE_WAVES) {
887  s->xleft, s->ytop, s->width, s->height,
888  bgcolor, 0);
889 
890  fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
891 
892  /* total height for one channel */
893  h = s->height / nb_display_channels;
894  /* graph height / 2 */
895  h2 = (h * 9) / 20;
896  for (ch = 0; ch < nb_display_channels; ch++) {
897  i = i_start + ch;
898  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
899  for (x = 0; x < s->width; x++) {
900  y = (s->sample_array[i] * h2) >> 15;
901  if (y < 0) {
902  y = -y;
903  ys = y1 - y;
904  } else {
905  ys = y1;
906  }
908  s->xleft + x, ys, 1, y,
909  fgcolor, 0);
910  i += channels;
911  if (i >= SAMPLE_ARRAY_SIZE)
912  i -= SAMPLE_ARRAY_SIZE;
913  }
914  }
915 
916  fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
917 
918  for (ch = 1; ch < nb_display_channels; ch++) {
919  y = s->ytop + ch * h;
921  s->xleft, y, s->width, 1,
922  fgcolor, 0);
923  }
924  SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
925  } else {
926  nb_display_channels= FFMIN(nb_display_channels, 2);
927  if (rdft_bits != s->rdft_bits) {
928  av_rdft_end(s->rdft);
929  av_free(s->rdft_data);
930  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
931  s->rdft_bits = rdft_bits;
932  s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
933  }
934  {
935  FFTSample *data[2];
936  for (ch = 0; ch < nb_display_channels; ch++) {
937  data[ch] = s->rdft_data + 2 * nb_freq * ch;
938  i = i_start + ch;
939  for (x = 0; x < 2 * nb_freq; x++) {
940  double w = (x-nb_freq) * (1.0 / nb_freq);
941  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
942  i += channels;
943  if (i >= SAMPLE_ARRAY_SIZE)
944  i -= SAMPLE_ARRAY_SIZE;
945  }
946  av_rdft_calc(s->rdft, data[ch]);
947  }
948  // least efficient way to do this, we should of course directly access it but its more than fast enough
949  for (y = 0; y < s->height; y++) {
950  double w = 1 / sqrt(nb_freq);
951  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
952  int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
953  + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
954  a = FFMIN(a, 255);
955  b = FFMIN(b, 255);
956  fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
957 
959  s->xpos, s->height-y, 1, 1,
960  fgcolor, 0);
961  }
962  }
963  SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
964  if (!s->paused)
965  s->xpos++;
966  if (s->xpos >= s->width)
967  s->xpos= s->xleft;
968  }
969 }
970 
971 static void stream_close(VideoState *is)
972 {
973  VideoPicture *vp;
974  int i;
975  /* XXX: use a special url_shutdown call to abort parse cleanly */
976  is->abort_request = 1;
977  SDL_WaitThread(is->read_tid, NULL);
981 
982  /* free all pictures */
983  for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
984  vp = &is->pictq[i];
985 #if CONFIG_AVFILTER
986  avfilter_unref_bufferp(&vp->picref);
987 #endif
988  if (vp->bmp) {
989  SDL_FreeYUVOverlay(vp->bmp);
990  vp->bmp = NULL;
991  }
992  }
993  SDL_DestroyMutex(is->pictq_mutex);
994  SDL_DestroyCond(is->pictq_cond);
995  SDL_DestroyMutex(is->subpq_mutex);
996  SDL_DestroyCond(is->subpq_cond);
997  SDL_DestroyCond(is->continue_read_thread);
998 #if !CONFIG_AVFILTER
1000 #endif
1001  av_free(is);
1002 }
1003 
1004 static void do_exit(VideoState *is)
1005 {
1006  if (is) {
1007  stream_close(is);
1008  }
1010  uninit_opts();
1011 #if CONFIG_AVFILTER
1012  avfilter_uninit();
1013  av_freep(&vfilters);
1014 #endif
1016  if (show_status)
1017  printf("\n");
1018  SDL_Quit();
1019  av_log(NULL, AV_LOG_QUIET, "%s", "");
1020  exit(0);
1021 }
1022 
1023 static void sigterm_handler(int sig)
1024 {
1025  exit(123);
1026 }
1027 
1028 static int video_open(VideoState *is, int force_set_video_mode, VideoPicture *vp)
1029 {
1030  int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
1031  int w,h;
1032  SDL_Rect rect;
1033 
1034  if (is_full_screen) flags |= SDL_FULLSCREEN;
1035  else flags |= SDL_RESIZABLE;
1036 
1037  if (vp && vp->width) {
1038  calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
1039  default_width = rect.w;
1040  default_height = rect.h;
1041  }
1042 
1044  w = fs_screen_width;
1045  h = fs_screen_height;
1046  } else if (!is_full_screen && screen_width) {
1047  w = screen_width;
1048  h = screen_height;
1049  } else {
1050  w = default_width;
1051  h = default_height;
1052  }
1053  if (screen && is->width == screen->w && screen->w == w
1054  && is->height== screen->h && screen->h == h && !force_set_video_mode)
1055  return 0;
1056  screen = SDL_SetVideoMode(w, h, 0, flags);
1057  if (!screen) {
1058  fprintf(stderr, "SDL: could not set video mode - exiting\n");
1059  do_exit(is);
1060  }
1061  if (!window_title)
1063  SDL_WM_SetCaption(window_title, window_title);
1064 
1065  is->width = screen->w;
1066  is->height = screen->h;
1067 
1068  return 0;
1069 }
1070 
1071 /* display the current picture, if any */
1072 static void video_display(VideoState *is)
1073 {
1074  if (!screen)
1075  video_open(is, 0, NULL);
1076  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1077  video_audio_display(is);
1078  else if (is->video_st)
1079  video_image_display(is);
1080 }
1081 
1082 /* get the current audio clock value */
1083 static double get_audio_clock(VideoState *is)
1084 {
1085  if (is->audio_clock_serial != is->audioq.serial)
1086  return NAN;
1087  if (is->paused) {
1088  return is->audio_current_pts;
1089  } else {
1090  return is->audio_current_pts_drift + av_gettime() / 1000000.0;
1091  }
1092 }
1093 
1094 /* get the current video clock value */
1095 static double get_video_clock(VideoState *is)
1096 {
1097  if (is->video_clock_serial != is->videoq.serial)
1098  return NAN;
1099  if (is->paused) {
1100  return is->video_current_pts;
1101  } else {
1102  return is->video_current_pts_drift + av_gettime() / 1000000.0;
1103  }
1104 }
1105 
1106 /* get the current external clock value */
1107 static double get_external_clock(VideoState *is)
1108 {
1109  if (is->paused) {
1110  return is->external_clock;
1111  } else {
1112  double time = av_gettime() / 1000000.0;
1113  return is->external_clock_drift + time - (time - is->external_clock_time / 1000000.0) * (1.0 - is->external_clock_speed);
1114  }
1115 }
1116 
1118  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1119  if (is->video_st)
1120  return AV_SYNC_VIDEO_MASTER;
1121  else
1122  return AV_SYNC_AUDIO_MASTER;
1123  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1124  if (is->audio_st)
1125  return AV_SYNC_AUDIO_MASTER;
1126  else
1127  return AV_SYNC_EXTERNAL_CLOCK;
1128  } else {
1129  return AV_SYNC_EXTERNAL_CLOCK;
1130  }
1131 }
1132 
1133 /* get the current master clock value */
1134 static double get_master_clock(VideoState *is)
1135 {
1136  double val;
1137 
1138  switch (get_master_sync_type(is)) {
1139  case AV_SYNC_VIDEO_MASTER:
1140  val = get_video_clock(is);
1141  break;
1142  case AV_SYNC_AUDIO_MASTER:
1143  val = get_audio_clock(is);
1144  break;
1145  default:
1146  val = get_external_clock(is);
1147  break;
1148  }
1149  return val;
1150 }
1151 
1152 static void update_external_clock_pts(VideoState *is, double pts)
1153 {
1155  is->external_clock = pts;
1156  is->external_clock_drift = pts - is->external_clock_time / 1000000.0;
1157 }
1158 
1159 static void check_external_clock_sync(VideoState *is, double pts) {
1160  double ext_clock = get_external_clock(is);
1161  if (isnan(ext_clock) || fabs(ext_clock - pts) > AV_NOSYNC_THRESHOLD) {
1162  update_external_clock_pts(is, pts);
1163  }
1164 }
1165 
1166 static void update_external_clock_speed(VideoState *is, double speed) {
1168  is->external_clock_speed = speed;
1169 }
1170 
1172  if (is->video_stream >= 0 && is->videoq.nb_packets <= MIN_FRAMES / 2 ||
1173  is->audio_stream >= 0 && is->audioq.nb_packets <= MIN_FRAMES / 2) {
1175  } else if ((is->video_stream < 0 || is->videoq.nb_packets > MIN_FRAMES * 2) &&
1176  (is->audio_stream < 0 || is->audioq.nb_packets > MIN_FRAMES * 2)) {
1178  } else {
1179  double speed = is->external_clock_speed;
1180  if (speed != 1.0)
1181  update_external_clock_speed(is, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1182  }
1183 }
1184 
1185 /* seek in the stream */
1186 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1187 {
1188  if (!is->seek_req) {
1189  is->seek_pos = pos;
1190  is->seek_rel = rel;
1191  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1192  if (seek_by_bytes)
1194  is->seek_req = 1;
1195  SDL_CondSignal(is->continue_read_thread);
1196  }
1197 }
1198 
1199 /* pause or resume the video */
1201 {
1202  if (is->paused) {
1203  is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1204  if (is->read_pause_return != AVERROR(ENOSYS)) {
1205  is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1206  }
1207  is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1208  }
1210  is->paused = !is->paused;
1211 }
1212 
1213 static void toggle_pause(VideoState *is)
1214 {
1215  stream_toggle_pause(is);
1216  is->step = 0;
1217 }
1218 
1220 {
1221  /* if the stream is paused unpause it, then step */
1222  if (is->paused)
1223  stream_toggle_pause(is);
1224  is->step = 1;
1225 }
1226 
1227 static double compute_target_delay(double delay, VideoState *is)
1228 {
1229  double sync_threshold, diff;
1230 
1231  /* update delay to follow master synchronisation source */
1233  /* if video is slave, we try to correct big delays by
1234  duplicating or deleting a frame */
1235  diff = get_video_clock(is) - get_master_clock(is);
1236 
1237  /* skip or repeat frame. We take into account the
1238  delay to compute the threshold. I still don't know
1239  if it is the best guess */
1240  sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1241  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
1242  if (diff <= -sync_threshold)
1243  delay = 0;
1244  else if (diff >= sync_threshold)
1245  delay = 2 * delay;
1246  }
1247  }
1248 
1249  av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1250  delay, -diff);
1251 
1252  return delay;
1253 }
1254 
1255 static void pictq_next_picture(VideoState *is) {
1256  /* update queue size and signal for next picture */
1258  is->pictq_rindex = 0;
1259 
1260  SDL_LockMutex(is->pictq_mutex);
1261  is->pictq_size--;
1262  SDL_CondSignal(is->pictq_cond);
1263  SDL_UnlockMutex(is->pictq_mutex);
1264 }
1265 
1267  VideoPicture *prevvp;
1268  int ret = 0;
1269  /* update queue size and signal for the previous picture */
1271  if (prevvp->allocated && prevvp->serial == is->videoq.serial) {
1272  SDL_LockMutex(is->pictq_mutex);
1273  if (is->pictq_size < VIDEO_PICTURE_QUEUE_SIZE - 1) {
1274  if (--is->pictq_rindex == -1)
1276  is->pictq_size++;
1277  ret = 1;
1278  }
1279  SDL_CondSignal(is->pictq_cond);
1280  SDL_UnlockMutex(is->pictq_mutex);
1281  }
1282  return ret;
1283 }
1284 
1285 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1286  double time = av_gettime() / 1000000.0;
1287  /* update current video pts */
1288  is->video_current_pts = pts;
1289  is->video_current_pts_drift = is->video_current_pts - time;
1290  is->video_current_pos = pos;
1291  is->frame_last_pts = pts;
1292  is->video_clock_serial = serial;
1293  if (is->videoq.serial == serial)
1295 }
1296 
1297 /* called to display each frame */
1298 static void video_refresh(void *opaque, double *remaining_time)
1299 {
1300  VideoState *is = opaque;
1301  VideoPicture *vp;
1302  double time;
1303 
1304  SubPicture *sp, *sp2;
1305 
1306  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1308 
1309  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1310  time = av_gettime() / 1000000.0;
1311  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1312  video_display(is);
1313  is->last_vis_time = time;
1314  }
1315  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1316  }
1317 
1318  if (is->video_st) {
1319  int redisplay = 0;
1320  if (is->force_refresh)
1321  redisplay = pictq_prev_picture(is);
1322 retry:
1323  if (is->pictq_size == 0) {
1324  SDL_LockMutex(is->pictq_mutex);
1328  }
1329  SDL_UnlockMutex(is->pictq_mutex);
1330  // nothing to do, no picture to display in the queue
1331  } else {
1332  double last_duration, duration, delay;
1333  /* dequeue the picture */
1334  vp = &is->pictq[is->pictq_rindex];
1335 
1336  if (vp->serial != is->videoq.serial) {
1337  pictq_next_picture(is);
1338  redisplay = 0;
1339  goto retry;
1340  }
1341 
1342  if (is->paused)
1343  goto display;
1344 
1345  /* compute nominal last_duration */
1346  last_duration = vp->pts - is->frame_last_pts;
1347  if (last_duration > 0 && last_duration < is->max_frame_duration) {
1348  /* if duration of the last frame was sane, update last_duration in video state */
1349  is->frame_last_duration = last_duration;
1350  }
1351  delay = compute_target_delay(is->frame_last_duration, is);
1352 
1353  time= av_gettime()/1000000.0;
1354  if (time < is->frame_timer + delay) {
1355  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1356  return;
1357  }
1358 
1359  if (delay > 0)
1360  is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1361 
1362  SDL_LockMutex(is->pictq_mutex);
1363  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1364  SDL_UnlockMutex(is->pictq_mutex);
1365 
1366  if (is->pictq_size > 1) {
1367  VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1368  duration = nextvp->pts - vp->pts;
1369  if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1370  if (!redisplay)
1371  is->frame_drops_late++;
1372  pictq_next_picture(is);
1373  redisplay = 0;
1374  goto retry;
1375  }
1376  }
1377 
1378  if (is->subtitle_st) {
1379  if (is->subtitle_stream_changed) {
1380  SDL_LockMutex(is->subpq_mutex);
1381 
1382  while (is->subpq_size) {
1383  free_subpicture(&is->subpq[is->subpq_rindex]);
1384 
1385  /* update queue size and signal for next picture */
1386  if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1387  is->subpq_rindex = 0;
1388 
1389  is->subpq_size--;
1390  }
1391  is->subtitle_stream_changed = 0;
1392 
1393  SDL_CondSignal(is->subpq_cond);
1394  SDL_UnlockMutex(is->subpq_mutex);
1395  } else {
1396  if (is->subpq_size > 0) {
1397  sp = &is->subpq[is->subpq_rindex];
1398 
1399  if (is->subpq_size > 1)
1400  sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1401  else
1402  sp2 = NULL;
1403 
1404  if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1405  || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1406  {
1407  free_subpicture(sp);
1408 
1409  /* update queue size and signal for next picture */
1410  if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1411  is->subpq_rindex = 0;
1412 
1413  SDL_LockMutex(is->subpq_mutex);
1414  is->subpq_size--;
1415  SDL_CondSignal(is->subpq_cond);
1416  SDL_UnlockMutex(is->subpq_mutex);
1417  }
1418  }
1419  }
1420  }
1421 
1422 display:
1423  /* display picture */
1424  if (!display_disable && is->show_mode == SHOW_MODE_VIDEO)
1425  video_display(is);
1426 
1427  pictq_next_picture(is);
1428 
1429  if (is->step && !is->paused)
1430  stream_toggle_pause(is);
1431  }
1432  }
1433  is->force_refresh = 0;
1434  if (show_status) {
1435  static int64_t last_time;
1436  int64_t cur_time;
1437  int aqsize, vqsize, sqsize;
1438  double av_diff;
1439 
1440  cur_time = av_gettime();
1441  if (!last_time || (cur_time - last_time) >= 30000) {
1442  aqsize = 0;
1443  vqsize = 0;
1444  sqsize = 0;
1445  if (is->audio_st)
1446  aqsize = is->audioq.size;
1447  if (is->video_st)
1448  vqsize = is->videoq.size;
1449  if (is->subtitle_st)
1450  sqsize = is->subtitleq.size;
1451  av_diff = 0;
1452  if (is->audio_st && is->video_st)
1453  av_diff = get_audio_clock(is) - get_video_clock(is);
1454  printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1455  get_master_clock(is),
1456  av_diff,
1458  aqsize / 1024,
1459  vqsize / 1024,
1460  sqsize,
1463  fflush(stdout);
1464  last_time = cur_time;
1465  }
1466  }
1467 }
1468 
1469 /* allocate a picture (needs to do that in main thread to avoid
1470  potential locking problems */
1471 static void alloc_picture(VideoState *is)
1472 {
1473  VideoPicture *vp;
1474 
1475  vp = &is->pictq[is->pictq_windex];
1476 
1477  if (vp->bmp)
1478  SDL_FreeYUVOverlay(vp->bmp);
1479 
1480 #if CONFIG_AVFILTER
1481  avfilter_unref_bufferp(&vp->picref);
1482 #endif
1483 
1484  video_open(is, 0, vp);
1485 
1486  vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1487  SDL_YV12_OVERLAY,
1488  screen);
1489  if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1490  /* SDL allocates a buffer smaller than requested if the video
1491  * overlay hardware is unable to support the requested size. */
1492  fprintf(stderr, "Error: the video system does not support an image\n"
1493  "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1494  "to reduce the image size.\n", vp->width, vp->height );
1495  do_exit(is);
1496  }
1497 
1498  SDL_LockMutex(is->pictq_mutex);
1499  vp->allocated = 1;
1500  SDL_CondSignal(is->pictq_cond);
1501  SDL_UnlockMutex(is->pictq_mutex);
1502 }
1503 
1504 static void duplicate_right_border_pixels(SDL_Overlay *bmp) {
1505  int i, width, height;
1506  Uint8 *p, *maxp;
1507  for (i = 0; i < 3; i++) {
1508  width = bmp->w;
1509  height = bmp->h;
1510  if (i > 0) {
1511  width >>= 1;
1512  height >>= 1;
1513  }
1514  if (bmp->pitches[i] > width) {
1515  maxp = bmp->pixels[i] + bmp->pitches[i] * height - 1;
1516  for (p = bmp->pixels[i] + width - 1; p < maxp; p += bmp->pitches[i])
1517  *(p+1) = *p;
1518  }
1519  }
1520 }
1521 
1522 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos, int serial)
1523 {
1524  VideoPicture *vp;
1525 
1526 #if defined(DEBUG_SYNC) && 0
1527  printf("frame_type=%c pts=%0.3f\n",
1528  av_get_picture_type_char(src_frame->pict_type), pts);
1529 #endif
1530 
1531  /* wait until we have space to put a new picture */
1532  SDL_LockMutex(is->pictq_mutex);
1533 
1534  /* keep the last already displayed picture in the queue */
1535  while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE - 2 &&
1536  !is->videoq.abort_request) {
1537  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1538  }
1539  SDL_UnlockMutex(is->pictq_mutex);
1540 
1541  if (is->videoq.abort_request)
1542  return -1;
1543 
1544  vp = &is->pictq[is->pictq_windex];
1545 
1546 #if CONFIG_AVFILTER
1547  vp->sample_aspect_ratio = ((AVFilterBufferRef *)src_frame->opaque)->video->sample_aspect_ratio;
1548 #else
1549  vp->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, src_frame);
1550 #endif
1551 
1552  /* alloc or resize hardware picture buffer */
1553  if (!vp->bmp || vp->reallocate || !vp->allocated ||
1554  vp->width != src_frame->width ||
1555  vp->height != src_frame->height) {
1556  SDL_Event event;
1557 
1558  vp->allocated = 0;
1559  vp->reallocate = 0;
1560  vp->width = src_frame->width;
1561  vp->height = src_frame->height;
1562 
1563  /* the allocation must be done in the main thread to avoid
1564  locking problems. */
1565  event.type = FF_ALLOC_EVENT;
1566  event.user.data1 = is;
1567  SDL_PushEvent(&event);
1568 
1569  /* wait until the picture is allocated */
1570  SDL_LockMutex(is->pictq_mutex);
1571  while (!vp->allocated && !is->videoq.abort_request) {
1572  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1573  }
1574  /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1575  if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1576  while (!vp->allocated) {
1577  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1578  }
1579  }
1580  SDL_UnlockMutex(is->pictq_mutex);
1581 
1582  if (is->videoq.abort_request)
1583  return -1;
1584  }
1585 
1586  /* if the frame is not skipped, then display it */
1587  if (vp->bmp) {
1588  AVPicture pict = { { 0 } };
1589 #if CONFIG_AVFILTER
1590  avfilter_unref_bufferp(&vp->picref);
1591  vp->picref = src_frame->opaque;
1592 #endif
1593 
1594  /* get a pointer on the bitmap */
1595  SDL_LockYUVOverlay (vp->bmp);
1596 
1597  pict.data[0] = vp->bmp->pixels[0];
1598  pict.data[1] = vp->bmp->pixels[2];
1599  pict.data[2] = vp->bmp->pixels[1];
1600 
1601  pict.linesize[0] = vp->bmp->pitches[0];
1602  pict.linesize[1] = vp->bmp->pitches[2];
1603  pict.linesize[2] = vp->bmp->pitches[1];
1604 
1605 #if CONFIG_AVFILTER
1606  // FIXME use direct rendering
1607  av_picture_copy(&pict, (AVPicture *)src_frame,
1608  src_frame->format, vp->width, vp->height);
1609 #else
1610  av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1612  vp->width, vp->height, src_frame->format, vp->width, vp->height,
1614  if (is->img_convert_ctx == NULL) {
1615  fprintf(stderr, "Cannot initialize the conversion context\n");
1616  exit(1);
1617  }
1618  sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1619  0, vp->height, pict.data, pict.linesize);
1620 #endif
1621  /* workaround SDL PITCH_WORKAROUND */
1623  /* update the bitmap content */
1624  SDL_UnlockYUVOverlay(vp->bmp);
1625 
1626  vp->pts = pts;
1627  vp->pos = pos;
1628  vp->serial = serial;
1629 
1630  /* now we can update the picture count */
1632  is->pictq_windex = 0;
1633  SDL_LockMutex(is->pictq_mutex);
1634  is->pictq_size++;
1635  SDL_UnlockMutex(is->pictq_mutex);
1636  }
1637  return 0;
1638 }
1639 
1640 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt, int *serial)
1641 {
1642  int got_picture;
1643 
1644  if (packet_queue_get(&is->videoq, pkt, 1, serial) < 0)
1645  return -1;
1646 
1647  if (pkt->data == flush_pkt.data) {
1649 
1650  SDL_LockMutex(is->pictq_mutex);
1651  // Make sure there are no long delay timers (ideally we should just flush the queue but that's harder)
1652  while (is->pictq_size && !is->videoq.abort_request) {
1653  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1654  }
1655  is->video_current_pos = -1;
1657  is->frame_last_duration = 0;
1658  is->frame_timer = (double)av_gettime() / 1000000.0;
1660  SDL_UnlockMutex(is->pictq_mutex);
1661 
1662  return 0;
1663  }
1664 
1665  if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
1666  return 0;
1667 
1668  if (got_picture) {
1669  int ret = 1;
1670 
1671  if (decoder_reorder_pts == -1) {
1672  *pts = av_frame_get_best_effort_timestamp(frame);
1673  } else if (decoder_reorder_pts) {
1674  *pts = frame->pkt_pts;
1675  } else {
1676  *pts = frame->pkt_dts;
1677  }
1678 
1679  if (*pts == AV_NOPTS_VALUE) {
1680  *pts = 0;
1681  }
1682 
1684  SDL_LockMutex(is->pictq_mutex);
1685  if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) {
1686  double clockdiff = get_video_clock(is) - get_master_clock(is);
1687  double dpts = av_q2d(is->video_st->time_base) * *pts;
1688  double ptsdiff = dpts - is->frame_last_pts;
1689  if (!isnan(clockdiff) && fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1690  ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1691  clockdiff + ptsdiff - is->frame_last_filter_delay < 0) {
1692  is->frame_last_dropped_pos = pkt->pos;
1693  is->frame_last_dropped_pts = dpts;
1694  is->frame_drops_early++;
1695  ret = 0;
1696  }
1697  }
1698  SDL_UnlockMutex(is->pictq_mutex);
1699  }
1700 
1701  return ret;
1702  }
1703  return 0;
1704 }
1705 
1706 #if CONFIG_AVFILTER
1707 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1708  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1709 {
1710  int ret;
1712 
1713  if (filtergraph) {
1714  outputs = avfilter_inout_alloc();
1715  inputs = avfilter_inout_alloc();
1716  if (!outputs || !inputs) {
1717  ret = AVERROR(ENOMEM);
1718  goto fail;
1719  }
1720 
1721  outputs->name = av_strdup("in");
1722  outputs->filter_ctx = source_ctx;
1723  outputs->pad_idx = 0;
1724  outputs->next = NULL;
1725 
1726  inputs->name = av_strdup("out");
1727  inputs->filter_ctx = sink_ctx;
1728  inputs->pad_idx = 0;
1729  inputs->next = NULL;
1730 
1731  if ((ret = avfilter_graph_parse(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1732  goto fail;
1733  } else {
1734  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1735  goto fail;
1736  }
1737 
1738  ret = avfilter_graph_config(graph, NULL);
1739 fail:
1740  avfilter_inout_free(&outputs);
1741  avfilter_inout_free(&inputs);
1742  return ret;
1743 }
1744 
1745 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1746 {
1747  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1748  char sws_flags_str[128];
1749  char buffersrc_args[256];
1750  int ret;
1751  AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1752  AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_crop;
1753  AVCodecContext *codec = is->video_st->codec;
1754 
1755  if (!buffersink_params)
1756  return AVERROR(ENOMEM);
1757 
1758  av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1759  snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1760  graph->scale_sws_opts = av_strdup(sws_flags_str);
1761 
1762  snprintf(buffersrc_args, sizeof(buffersrc_args),
1763  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1764  frame->width, frame->height, frame->format,
1766  codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1767 
1768  if ((ret = avfilter_graph_create_filter(&filt_src,
1769  avfilter_get_by_name("buffer"),
1770  "ffplay_buffer", buffersrc_args, NULL,
1771  graph)) < 0)
1772  goto fail;
1773 
1774  buffersink_params->pixel_fmts = pix_fmts;
1775  ret = avfilter_graph_create_filter(&filt_out,
1776  avfilter_get_by_name("ffbuffersink"),
1777  "ffplay_buffersink", NULL, buffersink_params, graph);
1778  if (ret < 0)
1779  goto fail;
1780 
1781  /* SDL YUV code is not handling odd width/height for some driver
1782  * combinations, therefore we crop the picture to an even width/height. */
1783  if ((ret = avfilter_graph_create_filter(&filt_crop,
1784  avfilter_get_by_name("crop"),
1785  "ffplay_crop", "floor(in_w/2)*2:floor(in_h/2)*2", NULL, graph)) < 0)
1786  goto fail;
1787  if ((ret = avfilter_link(filt_crop, 0, filt_out, 0)) < 0)
1788  goto fail;
1789 
1790  if ((ret = configure_filtergraph(graph, vfilters, filt_src, filt_crop)) < 0)
1791  goto fail;
1792 
1793  is->in_video_filter = filt_src;
1794  is->out_video_filter = filt_out;
1795 
1796 fail:
1797  av_freep(&buffersink_params);
1798  return ret;
1799 }
1800 
1801 #endif /* CONFIG_AVFILTER */
1802 
1803 static int video_thread(void *arg)
1804 {
1805  AVPacket pkt = { 0 };
1806  VideoState *is = arg;
1807  AVFrame *frame = avcodec_alloc_frame();
1808  int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1809  double pts;
1810  int ret;
1811  int serial = 0;
1812 
1813 #if CONFIG_AVFILTER
1814  AVCodecContext *codec = is->video_st->codec;
1816  AVFilterContext *filt_out = NULL, *filt_in = NULL;
1817  int last_w = 0;
1818  int last_h = 0;
1819  enum AVPixelFormat last_format = -2;
1820  int last_serial = -1;
1821 
1822  if (codec->codec->capabilities & CODEC_CAP_DR1) {
1823  is->use_dr1 = 1;
1824  codec->get_buffer = codec_get_buffer;
1826  codec->opaque = &is->buffer_pool;
1827  }
1828 #endif
1829 
1830  for (;;) {
1831 #if CONFIG_AVFILTER
1832  AVFilterBufferRef *picref;
1833  AVRational tb;
1834 #endif
1835  while (is->paused && !is->videoq.abort_request)
1836  SDL_Delay(10);
1837 
1839  av_free_packet(&pkt);
1840 
1841  ret = get_video_frame(is, frame, &pts_int, &pkt, &serial);
1842  if (ret < 0)
1843  goto the_end;
1844 
1845  if (!ret)
1846  continue;
1847 
1848 #if CONFIG_AVFILTER
1849  if ( last_w != frame->width
1850  || last_h != frame->height
1851  || last_format != frame->format
1852  || last_serial != serial) {
1854  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
1855  last_w, last_h,
1856  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
1857  frame->width, frame->height,
1858  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), serial);
1859  avfilter_graph_free(&graph);
1860  graph = avfilter_graph_alloc();
1861  if ((ret = configure_video_filters(graph, is, vfilters, frame)) < 0) {
1862  SDL_Event event;
1863  event.type = FF_QUIT_EVENT;
1864  event.user.data1 = is;
1865  SDL_PushEvent(&event);
1866  av_free_packet(&pkt);
1867  goto the_end;
1868  }
1869  filt_in = is->in_video_filter;
1870  filt_out = is->out_video_filter;
1871  last_w = frame->width;
1872  last_h = frame->height;
1873  last_format = frame->format;
1874  last_serial = serial;
1875  }
1876 
1877  frame->pts = pts_int;
1878  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1879  if (is->use_dr1 && frame->opaque) {
1880  FrameBuffer *buf = frame->opaque;
1882  frame->data, frame->linesize,
1884  frame->width, frame->height,
1885  frame->format);
1886 
1887  avfilter_copy_frame_props(fb, frame);
1888  fb->buf->priv = buf;
1890 
1891  buf->refcount++;
1893 
1894  } else
1895  av_buffersrc_write_frame(filt_in, frame);
1896 
1897  av_free_packet(&pkt);
1898 
1899  while (ret >= 0) {
1900  is->frame_last_returned_time = av_gettime() / 1000000.0;
1901 
1902  ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1903  if (ret < 0) {
1904  ret = 0;
1905  break;
1906  }
1907 
1909  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1910  is->frame_last_filter_delay = 0;
1911 
1912  avfilter_copy_buf_props(frame, picref);
1913 
1914  pts_int = picref->pts;
1915  tb = filt_out->inputs[0]->time_base;
1916  pos = picref->pos;
1917  frame->opaque = picref;
1918 
1919  if (av_cmp_q(tb, is->video_st->time_base)) {
1920  av_unused int64_t pts1 = pts_int;
1921  pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1922  av_dlog(NULL, "video_thread(): "
1923  "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1924  tb.num, tb.den, pts1,
1925  is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1926  }
1927  pts = pts_int * av_q2d(is->video_st->time_base);
1928  ret = queue_picture(is, frame, pts, pos, serial);
1929  }
1930 #else
1931  pts = pts_int * av_q2d(is->video_st->time_base);
1932  ret = queue_picture(is, frame, pts, pkt.pos, serial);
1933 #endif
1934 
1935  if (ret < 0)
1936  goto the_end;
1937  }
1938  the_end:
1940 #if CONFIG_AVFILTER
1941  avfilter_graph_free(&graph);
1942 #endif
1943  av_free_packet(&pkt);
1944  avcodec_free_frame(&frame);
1945  return 0;
1946 }
1947 
1948 static int subtitle_thread(void *arg)
1949 {
1950  VideoState *is = arg;
1951  SubPicture *sp;
1952  AVPacket pkt1, *pkt = &pkt1;
1953  int got_subtitle;
1954  double pts;
1955  int i, j;
1956  int r, g, b, y, u, v, a;
1957 
1958  for (;;) {
1959  while (is->paused && !is->subtitleq.abort_request) {
1960  SDL_Delay(10);
1961  }
1962  if (packet_queue_get(&is->subtitleq, pkt, 1, NULL) < 0)
1963  break;
1964 
1965  if (pkt->data == flush_pkt.data) {
1967  continue;
1968  }
1969  SDL_LockMutex(is->subpq_mutex);
1970  while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1971  !is->subtitleq.abort_request) {
1972  SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1973  }
1974  SDL_UnlockMutex(is->subpq_mutex);
1975 
1976  if (is->subtitleq.abort_request)
1977  return 0;
1978 
1979  sp = &is->subpq[is->subpq_windex];
1980 
1981  /* NOTE: ipts is the PTS of the _first_ picture beginning in
1982  this packet, if any */
1983  pts = 0;
1984  if (pkt->pts != AV_NOPTS_VALUE)
1985  pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1986 
1988  &got_subtitle, pkt);
1989  if (got_subtitle && sp->sub.format == 0) {
1990  if (sp->sub.pts != AV_NOPTS_VALUE)
1991  pts = sp->sub.pts / (double)AV_TIME_BASE;
1992  sp->pts = pts;
1993 
1994  for (i = 0; i < sp->sub.num_rects; i++)
1995  {
1996  for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1997  {
1998  RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1999  y = RGB_TO_Y_CCIR(r, g, b);
2000  u = RGB_TO_U_CCIR(r, g, b, 0);
2001  v = RGB_TO_V_CCIR(r, g, b, 0);
2002  YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
2003  }
2004  }
2005 
2006  /* now we can update the picture count */
2007  if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
2008  is->subpq_windex = 0;
2009  SDL_LockMutex(is->subpq_mutex);
2010  is->subpq_size++;
2011  SDL_UnlockMutex(is->subpq_mutex);
2012  }
2013  av_free_packet(pkt);
2014  }
2015  return 0;
2016 }
2017 
2018 /* copy samples for viewing in editor window */
2019 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2020 {
2021  int size, len;
2022 
2023  size = samples_size / sizeof(short);
2024  while (size > 0) {
2026  if (len > size)
2027  len = size;
2028  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2029  samples += len;
2030  is->sample_array_index += len;
2032  is->sample_array_index = 0;
2033  size -= len;
2034  }
2035 }
2036 
2037 /* return the wanted number of samples to get better sync if sync_type is video
2038  * or external master clock */
2040 {
2041  int wanted_nb_samples = nb_samples;
2042 
2043  /* if not master, then we try to remove or add samples to correct the clock */
2045  double diff, avg_diff;
2046  int min_nb_samples, max_nb_samples;
2047 
2048  diff = get_audio_clock(is) - get_master_clock(is);
2049 
2050  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2051  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2053  /* not enough measures to have a correct estimate */
2054  is->audio_diff_avg_count++;
2055  } else {
2056  /* estimate the A-V difference */
2057  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2058 
2059  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2060  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2061  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2062  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2063  wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2064  }
2065  av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2066  diff, avg_diff, wanted_nb_samples - nb_samples,
2068  }
2069  } else {
2070  /* too big difference : may be initial PTS errors, so
2071  reset A-V filter */
2072  is->audio_diff_avg_count = 0;
2073  is->audio_diff_cum = 0;
2074  }
2075  }
2076 
2077  return wanted_nb_samples;
2078 }
2079 
2080 /**
2081  * Decode one audio frame and return its uncompressed size.
2082  *
2083  * The processed audio frame is decoded, converted if required, and
2084  * stored in is->audio_buf, with size in bytes given by the return
2085  * value.
2086  */
2088 {
2089  AVPacket *pkt_temp = &is->audio_pkt_temp;
2090  AVPacket *pkt = &is->audio_pkt;
2091  AVCodecContext *dec = is->audio_st->codec;
2092  int len1, len2, data_size, resampled_data_size;
2093  int64_t dec_channel_layout;
2094  int got_frame;
2095  av_unused double audio_clock0;
2096  int new_packet = 0;
2097  int flush_complete = 0;
2098  int wanted_nb_samples;
2099 
2100  for (;;) {
2101  /* NOTE: the audio packet can contain several frames */
2102  while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2103  if (!is->frame) {
2104  if (!(is->frame = avcodec_alloc_frame()))
2105  return AVERROR(ENOMEM);
2106  } else
2108 
2109  if (is->audioq.serial != is->audio_pkt_temp_serial)
2110  break;
2111 
2112  if (is->paused)
2113  return -1;
2114 
2115  if (flush_complete)
2116  break;
2117  new_packet = 0;
2118  len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2119  if (len1 < 0) {
2120  /* if error, we skip the frame */
2121  pkt_temp->size = 0;
2122  break;
2123  }
2124 
2125  pkt_temp->data += len1;
2126  pkt_temp->size -= len1;
2127 
2128  if (!got_frame) {
2129  /* stop sending empty packets if the decoder is finished */
2130  if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2131  flush_complete = 1;
2132  continue;
2133  }
2135  is->frame->nb_samples,
2136  is->frame->format, 1);
2137 
2138  dec_channel_layout =
2141  wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
2142 
2143  if (is->frame->format != is->audio_src.fmt ||
2144  dec_channel_layout != is->audio_src.channel_layout ||
2145  is->frame->sample_rate != is->audio_src.freq ||
2146  (wanted_nb_samples != is->frame->nb_samples && !is->swr_ctx)) {
2147  swr_free(&is->swr_ctx);
2150  dec_channel_layout, is->frame->format, is->frame->sample_rate,
2151  0, NULL);
2152  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2153  fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2156  break;
2157  }
2158  is->audio_src.channel_layout = dec_channel_layout;
2160  is->audio_src.freq = is->frame->sample_rate;
2161  is->audio_src.fmt = is->frame->format;
2162  }
2163 
2164  if (is->swr_ctx) {
2165  const uint8_t **in = (const uint8_t **)is->frame->extended_data;
2166  uint8_t **out = &is->audio_buf1;
2167  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate + 256;
2168  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2169  if (wanted_nb_samples != is->frame->nb_samples) {
2170  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / is->frame->sample_rate,
2171  wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate) < 0) {
2172  fprintf(stderr, "swr_set_compensation() failed\n");
2173  break;
2174  }
2175  }
2176  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2177  if (!is->audio_buf1)
2178  return AVERROR(ENOMEM);
2179  len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples);
2180  if (len2 < 0) {
2181  fprintf(stderr, "swr_convert() failed\n");
2182  break;
2183  }
2184  if (len2 == out_count) {
2185  fprintf(stderr, "warning: audio buffer is probably too small\n");
2186  swr_init(is->swr_ctx);
2187  }
2188  is->audio_buf = is->audio_buf1;
2189  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2190  } else {
2191  is->audio_buf = is->frame->data[0];
2192  resampled_data_size = data_size;
2193  }
2194 
2195  audio_clock0 = is->audio_clock;
2196  is->audio_clock += (double)data_size /
2198 #ifdef DEBUG
2199  {
2200  static double last_clock;
2201  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2202  is->audio_clock - last_clock,
2203  is->audio_clock, audio_clock0);
2204  last_clock = is->audio_clock;
2205  }
2206 #endif
2207  return resampled_data_size;
2208  }
2209 
2210  /* free the current packet */
2211  if (pkt->data)
2212  av_free_packet(pkt);
2213  memset(pkt_temp, 0, sizeof(*pkt_temp));
2214 
2215  if (is->audioq.abort_request) {
2216  return -1;
2217  }
2218 
2219  if (is->audioq.nb_packets == 0)
2220  SDL_CondSignal(is->continue_read_thread);
2221 
2222  /* read next packet */
2223  if ((new_packet = packet_queue_get(&is->audioq, pkt, 1, &is->audio_pkt_temp_serial)) < 0)
2224  return -1;
2225 
2226  if (pkt->data == flush_pkt.data) {
2227  avcodec_flush_buffers(dec);
2228  flush_complete = 0;
2229  }
2230 
2231  *pkt_temp = *pkt;
2232 
2233  /* if update the audio clock with the pts */
2234  if (pkt->pts != AV_NOPTS_VALUE) {
2235  is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2237  }
2238  }
2239 }
2240 
2241 /* prepare a new audio buffer */
2242 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2243 {
2244  VideoState *is = opaque;
2245  int audio_size, len1;
2246  int bytes_per_sec;
2248 
2250 
2251  while (len > 0) {
2252  if (is->audio_buf_index >= is->audio_buf_size) {
2253  audio_size = audio_decode_frame(is);
2254  if (audio_size < 0) {
2255  /* if error, just output silence */
2256  is->audio_buf = is->silence_buf;
2257  is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
2258  } else {
2259  if (is->show_mode != SHOW_MODE_VIDEO)
2260  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2261  is->audio_buf_size = audio_size;
2262  }
2263  is->audio_buf_index = 0;
2264  }
2265  len1 = is->audio_buf_size - is->audio_buf_index;
2266  if (len1 > len)
2267  len1 = len;
2268  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2269  len -= len1;
2270  stream += len1;
2271  is->audio_buf_index += len1;
2272  }
2273  bytes_per_sec = is->audio_tgt.freq * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2275  /* Let's assume the audio driver that is used by SDL has two periods. */
2276  is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2278  if (is->audioq.serial == is->audio_clock_serial)
2280 }
2281 
2282 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2283 {
2284  SDL_AudioSpec wanted_spec, spec;
2285  const char *env;
2286  const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2287 
2288  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2289  if (env) {
2290  wanted_nb_channels = atoi(env);
2291  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2292  }
2293  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2294  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2295  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2296  }
2297  wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2298  wanted_spec.freq = wanted_sample_rate;
2299  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2300  fprintf(stderr, "Invalid sample rate or channel count!\n");
2301  return -1;
2302  }
2303  wanted_spec.format = AUDIO_S16SYS;
2304  wanted_spec.silence = 0;
2305  wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2306  wanted_spec.callback = sdl_audio_callback;
2307  wanted_spec.userdata = opaque;
2308  while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2309  fprintf(stderr, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError());
2310  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2311  if (!wanted_spec.channels) {
2312  fprintf(stderr, "No more channel combinations to try, audio open failed\n");
2313  return -1;
2314  }
2315  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2316  }
2317  if (spec.format != AUDIO_S16SYS) {
2318  fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2319  return -1;
2320  }
2321  if (spec.channels != wanted_spec.channels) {
2322  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2323  if (!wanted_channel_layout) {
2324  fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2325  return -1;
2326  }
2327  }
2328 
2329  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2330  audio_hw_params->freq = spec.freq;
2331  audio_hw_params->channel_layout = wanted_channel_layout;
2332  audio_hw_params->channels = spec.channels;
2333  return spec.size;
2334 }
2335 
2336 /* open a given stream. Return 0 if OK */
2337 static int stream_component_open(VideoState *is, int stream_index)
2338 {
2339  AVFormatContext *ic = is->ic;
2340  AVCodecContext *avctx;
2341  AVCodec *codec;
2342  const char *forced_codec_name = NULL;
2343  AVDictionary *opts;
2345 
2346  if (stream_index < 0 || stream_index >= ic->nb_streams)
2347  return -1;
2348  avctx = ic->streams[stream_index]->codec;
2349 
2350  codec = avcodec_find_decoder(avctx->codec_id);
2351 
2352  switch(avctx->codec_type){
2353  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2354  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2355  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2356  }
2357  if (forced_codec_name)
2358  codec = avcodec_find_decoder_by_name(forced_codec_name);
2359  if (!codec) {
2360  if (forced_codec_name) fprintf(stderr, "No codec could be found with name '%s'\n", forced_codec_name);
2361  else fprintf(stderr, "No codec could be found with id %d\n", avctx->codec_id);
2362  return -1;
2363  }
2364 
2365  avctx->codec_id = codec->id;
2367  avctx->lowres = lowres;
2368  if(avctx->lowres > codec->max_lowres){
2369  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2370  codec->max_lowres);
2371  avctx->lowres= codec->max_lowres;
2372  }
2373  avctx->idct_algo = idct;
2374  avctx->skip_frame = skip_frame;
2375  avctx->skip_idct = skip_idct;
2378 
2379  if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2380  if (fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2381  if(codec->capabilities & CODEC_CAP_DR1)
2382  avctx->flags |= CODEC_FLAG_EMU_EDGE;
2383 
2384  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2385  if (!av_dict_get(opts, "threads", NULL, 0))
2386  av_dict_set(&opts, "threads", "auto", 0);
2387  if (avcodec_open2(avctx, codec, &opts) < 0)
2388  return -1;
2389  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2390  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2391  return AVERROR_OPTION_NOT_FOUND;
2392  }
2393 
2394  /* prepare audio output */
2395  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2396  int audio_hw_buf_size = audio_open(is, avctx->channel_layout, avctx->channels, avctx->sample_rate, &is->audio_src);
2397  if (audio_hw_buf_size < 0)
2398  return -1;
2399  is->audio_hw_buf_size = audio_hw_buf_size;
2400  is->audio_tgt = is->audio_src;
2401  }
2402 
2403  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2404  switch (avctx->codec_type) {
2405  case AVMEDIA_TYPE_AUDIO:
2406  is->audio_stream = stream_index;
2407  is->audio_st = ic->streams[stream_index];
2408  is->audio_buf_size = 0;
2409  is->audio_buf_index = 0;
2410 
2411  /* init averaging filter */
2412  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2413  is->audio_diff_avg_count = 0;
2414  /* since we do not have a precise anough audio fifo fullness,
2415  we correct audio sync only if larger than this threshold */
2417 
2418  memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2419  memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2420  packet_queue_start(&is->audioq);
2421  SDL_PauseAudio(0);
2422  break;
2423  case AVMEDIA_TYPE_VIDEO:
2424  is->video_stream = stream_index;
2425  is->video_st = ic->streams[stream_index];
2426 
2427  packet_queue_start(&is->videoq);
2428  is->video_tid = SDL_CreateThread(video_thread, is);
2429  break;
2430  case AVMEDIA_TYPE_SUBTITLE:
2431  is->subtitle_stream = stream_index;
2432  is->subtitle_st = ic->streams[stream_index];
2434 
2435  is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2436  break;
2437  default:
2438  break;
2439  }
2440  return 0;
2441 }
2442 
2443 static void stream_component_close(VideoState *is, int stream_index)
2444 {
2445  AVFormatContext *ic = is->ic;
2446  AVCodecContext *avctx;
2447 
2448  if (stream_index < 0 || stream_index >= ic->nb_streams)
2449  return;
2450  avctx = ic->streams[stream_index]->codec;
2451 
2452  switch (avctx->codec_type) {
2453  case AVMEDIA_TYPE_AUDIO:
2454  packet_queue_abort(&is->audioq);
2455 
2456  SDL_CloseAudio();
2457 
2458  packet_queue_flush(&is->audioq);
2459  av_free_packet(&is->audio_pkt);
2460  swr_free(&is->swr_ctx);
2461  av_freep(&is->audio_buf1);
2462  is->audio_buf1_size = 0;
2463  is->audio_buf = NULL;
2464  avcodec_free_frame(&is->frame);
2465 
2466  if (is->rdft) {
2467  av_rdft_end(is->rdft);
2468  av_freep(&is->rdft_data);
2469  is->rdft = NULL;
2470  is->rdft_bits = 0;
2471  }
2472  break;
2473  case AVMEDIA_TYPE_VIDEO:
2474  packet_queue_abort(&is->videoq);
2475 
2476  /* note: we also signal this mutex to make sure we deblock the
2477  video thread in all cases */
2478  SDL_LockMutex(is->pictq_mutex);
2479  SDL_CondSignal(is->pictq_cond);
2480  SDL_UnlockMutex(is->pictq_mutex);
2481 
2482  SDL_WaitThread(is->video_tid, NULL);
2483 
2484  packet_queue_flush(&is->videoq);
2485  break;
2486  case AVMEDIA_TYPE_SUBTITLE:
2488 
2489  /* note: we also signal this mutex to make sure we deblock the
2490  video thread in all cases */
2491  SDL_LockMutex(is->subpq_mutex);
2492  is->subtitle_stream_changed = 1;
2493 
2494  SDL_CondSignal(is->subpq_cond);
2495  SDL_UnlockMutex(is->subpq_mutex);
2496 
2497  SDL_WaitThread(is->subtitle_tid, NULL);
2498 
2500  break;
2501  default:
2502  break;
2503  }
2504 
2505  ic->streams[stream_index]->discard = AVDISCARD_ALL;
2506  avcodec_close(avctx);
2507 #if CONFIG_AVFILTER
2508  free_buffer_pool(&is->buffer_pool);
2509 #endif
2510  switch (avctx->codec_type) {
2511  case AVMEDIA_TYPE_AUDIO:
2512  is->audio_st = NULL;
2513  is->audio_stream = -1;
2514  break;
2515  case AVMEDIA_TYPE_VIDEO:
2516  is->video_st = NULL;
2517  is->video_stream = -1;
2518  break;
2519  case AVMEDIA_TYPE_SUBTITLE:
2520  is->subtitle_st = NULL;
2521  is->subtitle_stream = -1;
2522  break;
2523  default:
2524  break;
2525  }
2526 }
2527 
2528 static int decode_interrupt_cb(void *ctx)
2529 {
2530  VideoState *is = ctx;
2531  return is->abort_request;
2532 }
2533 
2535 {
2536  if( !strcmp(s->iformat->name, "rtp")
2537  || !strcmp(s->iformat->name, "rtsp")
2538  || !strcmp(s->iformat->name, "sdp")
2539  )
2540  return 1;
2541 
2542  if(s->pb && ( !strncmp(s->filename, "rtp:", 4)
2543  || !strncmp(s->filename, "udp:", 4)
2544  )
2545  )
2546  return 1;
2547  return 0;
2548 }
2549 
2550 /* this thread gets the stream from the disk or the network */
2551 static int read_thread(void *arg)
2552 {
2553  VideoState *is = arg;
2554  AVFormatContext *ic = NULL;
2555  int err, i, ret;
2556  int st_index[AVMEDIA_TYPE_NB];
2557  AVPacket pkt1, *pkt = &pkt1;
2558  int eof = 0;
2559  int pkt_in_play_range = 0;
2561  AVDictionary **opts;
2562  int orig_nb_streams;
2563  SDL_mutex *wait_mutex = SDL_CreateMutex();
2564 
2565  memset(st_index, -1, sizeof(st_index));
2566  is->last_video_stream = is->video_stream = -1;
2567  is->last_audio_stream = is->audio_stream = -1;
2568  is->last_subtitle_stream = is->subtitle_stream = -1;
2569 
2570  ic = avformat_alloc_context();
2572  ic->interrupt_callback.opaque = is;
2573  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2574  if (err < 0) {
2575  print_error(is->filename, err);
2576  ret = -1;
2577  goto fail;
2578  }
2580  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2582  goto fail;
2583  }
2584  is->ic = ic;
2585 
2586  if (genpts)
2587  ic->flags |= AVFMT_FLAG_GENPTS;
2588 
2590  orig_nb_streams = ic->nb_streams;
2591 
2592  err = avformat_find_stream_info(ic, opts);
2593  if (err < 0) {
2594  fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2595  ret = -1;
2596  goto fail;
2597  }
2598  for (i = 0; i < orig_nb_streams; i++)
2599  av_dict_free(&opts[i]);
2600  av_freep(&opts);
2601 
2602  if (ic->pb)
2603  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2604 
2605  if (seek_by_bytes < 0)
2607 
2608  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2609 
2610  /* if seeking requested, we execute it */
2611  if (start_time != AV_NOPTS_VALUE) {
2612  int64_t timestamp;
2613 
2614  timestamp = start_time;
2615  /* add the stream start time */
2616  if (ic->start_time != AV_NOPTS_VALUE)
2617  timestamp += ic->start_time;
2618  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2619  if (ret < 0) {
2620  fprintf(stderr, "%s: could not seek to position %0.3f\n",
2621  is->filename, (double)timestamp / AV_TIME_BASE);
2622  }
2623  }
2624 
2625  is->realtime = is_realtime(ic);
2626 
2627  for (i = 0; i < ic->nb_streams; i++)
2628  ic->streams[i]->discard = AVDISCARD_ALL;
2629  if (!video_disable)
2630  st_index[AVMEDIA_TYPE_VIDEO] =
2633  if (!audio_disable)
2634  st_index[AVMEDIA_TYPE_AUDIO] =
2637  st_index[AVMEDIA_TYPE_VIDEO],
2638  NULL, 0);
2640  st_index[AVMEDIA_TYPE_SUBTITLE] =
2643  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2644  st_index[AVMEDIA_TYPE_AUDIO] :
2645  st_index[AVMEDIA_TYPE_VIDEO]),
2646  NULL, 0);
2647  if (show_status) {
2648  av_dump_format(ic, 0, is->filename, 0);
2649  }
2650 
2651  is->show_mode = show_mode;
2652 
2653  /* open the streams */
2654  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2655  stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2656  }
2657 
2658  ret = -1;
2659  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2660  ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2661  }
2662  if (is->show_mode == SHOW_MODE_NONE)
2663  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2664 
2665  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2666  stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2667  }
2668 
2669  if (is->video_stream < 0 && is->audio_stream < 0) {
2670  fprintf(stderr, "%s: could not open codecs\n", is->filename);
2671  ret = -1;
2672  goto fail;
2673  }
2674 
2675  if (infinite_buffer < 0 && is->realtime)
2676  infinite_buffer = 1;
2677 
2678  for (;;) {
2679  if (is->abort_request)
2680  break;
2681  if (is->paused != is->last_paused) {
2682  is->last_paused = is->paused;
2683  if (is->paused)
2684  is->read_pause_return = av_read_pause(ic);
2685  else
2686  av_read_play(ic);
2687  }
2688 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2689  if (is->paused &&
2690  (!strcmp(ic->iformat->name, "rtsp") ||
2691  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2692  /* wait 10 ms to avoid trying to get another packet */
2693  /* XXX: horrible */
2694  SDL_Delay(10);
2695  continue;
2696  }
2697 #endif
2698  if (is->seek_req) {
2699  int64_t seek_target = is->seek_pos;
2700  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2701  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2702 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2703 // of the seek_pos/seek_rel variables
2704 
2705  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2706  if (ret < 0) {
2707  fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2708  } else {
2709  if (is->audio_stream >= 0) {
2710  packet_queue_flush(&is->audioq);
2711  packet_queue_put(&is->audioq, &flush_pkt);
2712  }
2713  if (is->subtitle_stream >= 0) {
2715  packet_queue_put(&is->subtitleq, &flush_pkt);
2716  }
2717  if (is->video_stream >= 0) {
2718  packet_queue_flush(&is->videoq);
2719  packet_queue_put(&is->videoq, &flush_pkt);
2720  }
2721  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2723  } else {
2724  update_external_clock_pts(is, seek_target / (double)AV_TIME_BASE);
2725  }
2726  }
2727  is->seek_req = 0;
2728  eof = 0;
2729  if (is->paused)
2730  step_to_next_frame(is);
2731  }
2732  if (is->queue_attachments_req) {
2734  is->queue_attachments_req = 0;
2735  }
2736 
2737  /* if the queue are full, no need to read more */
2738  if (infinite_buffer<1 &&
2739  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2740  || ( (is->audioq .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2741  && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request)
2742  && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
2743  /* wait 10 ms */
2744  SDL_LockMutex(wait_mutex);
2745  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2746  SDL_UnlockMutex(wait_mutex);
2747  continue;
2748  }
2749  if (eof) {
2750  if (is->video_stream >= 0) {
2751  av_init_packet(pkt);
2752  pkt->data = NULL;
2753  pkt->size = 0;
2754  pkt->stream_index = is->video_stream;
2755  packet_queue_put(&is->videoq, pkt);
2756  }
2757  if (is->audio_stream >= 0 &&
2759  av_init_packet(pkt);
2760  pkt->data = NULL;
2761  pkt->size = 0;
2762  pkt->stream_index = is->audio_stream;
2763  packet_queue_put(&is->audioq, pkt);
2764  }
2765  SDL_Delay(10);
2766  if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2767  if (loop != 1 && (!loop || --loop)) {
2768  stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2769  } else if (autoexit) {
2770  ret = AVERROR_EOF;
2771  goto fail;
2772  }
2773  }
2774  eof=0;
2775  continue;
2776  }
2777  ret = av_read_frame(ic, pkt);
2778  if (ret < 0) {
2779  if (ret == AVERROR_EOF || url_feof(ic->pb))
2780  eof = 1;
2781  if (ic->pb && ic->pb->error)
2782  break;
2783  SDL_LockMutex(wait_mutex);
2784  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2785  SDL_UnlockMutex(wait_mutex);
2786  continue;
2787  }
2788  /* check if packet is in play range specified by user, then queue, otherwise discard */
2789  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2790  (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2791  av_q2d(ic->streams[pkt->stream_index]->time_base) -
2792  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2793  <= ((double)duration / 1000000);
2794  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2795  packet_queue_put(&is->audioq, pkt);
2796  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2797  packet_queue_put(&is->videoq, pkt);
2798  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2799  packet_queue_put(&is->subtitleq, pkt);
2800  } else {
2801  av_free_packet(pkt);
2802  }
2803  }
2804  /* wait until the end */
2805  while (!is->abort_request) {
2806  SDL_Delay(100);
2807  }
2808 
2809  ret = 0;
2810  fail:
2811  /* close each stream */
2812  if (is->audio_stream >= 0)
2814  if (is->video_stream >= 0)
2816  if (is->subtitle_stream >= 0)
2818  if (is->ic) {
2819  avformat_close_input(&is->ic);
2820  }
2821 
2822  if (ret != 0) {
2823  SDL_Event event;
2824 
2825  event.type = FF_QUIT_EVENT;
2826  event.user.data1 = is;
2827  SDL_PushEvent(&event);
2828  }
2829  SDL_DestroyMutex(wait_mutex);
2830  return 0;
2831 }
2832 
2833 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2834 {
2835  VideoState *is;
2836 
2837  is = av_mallocz(sizeof(VideoState));
2838  if (!is)
2839  return NULL;
2840  av_strlcpy(is->filename, filename, sizeof(is->filename));
2841  is->iformat = iformat;
2842  is->ytop = 0;
2843  is->xleft = 0;
2844 
2845  /* start video display */
2846  is->pictq_mutex = SDL_CreateMutex();
2847  is->pictq_cond = SDL_CreateCond();
2848 
2849  is->subpq_mutex = SDL_CreateMutex();
2850  is->subpq_cond = SDL_CreateCond();
2851 
2852  packet_queue_init(&is->videoq);
2853  packet_queue_init(&is->audioq);
2855 
2856  is->continue_read_thread = SDL_CreateCond();
2857 
2859  update_external_clock_speed(is, 1.0);
2860  is->audio_current_pts_drift = -av_gettime() / 1000000.0;
2862  is->audio_clock_serial = -1;
2863  is->video_clock_serial = -1;
2864  is->av_sync_type = av_sync_type;
2865  is->read_tid = SDL_CreateThread(read_thread, is);
2866  if (!is->read_tid) {
2867  av_free(is);
2868  return NULL;
2869  }
2870  return is;
2871 }
2872 
2874 {
2875  AVFormatContext *ic = is->ic;
2876  int start_index, stream_index;
2877  int old_index;
2878  AVStream *st;
2879 
2880  if (codec_type == AVMEDIA_TYPE_VIDEO) {
2881  start_index = is->last_video_stream;
2882  old_index = is->video_stream;
2883  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
2884  start_index = is->last_audio_stream;
2885  old_index = is->audio_stream;
2886  } else {
2887  start_index = is->last_subtitle_stream;
2888  old_index = is->subtitle_stream;
2889  }
2890  stream_index = start_index;
2891  for (;;) {
2892  if (++stream_index >= is->ic->nb_streams)
2893  {
2894  if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2895  {
2896  stream_index = -1;
2897  is->last_subtitle_stream = -1;
2898  goto the_end;
2899  }
2900  if (start_index == -1)
2901  return;
2902  stream_index = 0;
2903  }
2904  if (stream_index == start_index)
2905  return;
2906  st = ic->streams[stream_index];
2907  if (st->codec->codec_type == codec_type) {
2908  /* check that parameters are OK */
2909  switch (codec_type) {
2910  case AVMEDIA_TYPE_AUDIO:
2911  if (st->codec->sample_rate != 0 &&
2912  st->codec->channels != 0)
2913  goto the_end;
2914  break;
2915  case AVMEDIA_TYPE_VIDEO:
2916  case AVMEDIA_TYPE_SUBTITLE:
2917  goto the_end;
2918  default:
2919  break;
2920  }
2921  }
2922  }
2923  the_end:
2924  stream_component_close(is, old_index);
2925  stream_component_open(is, stream_index);
2926  if (codec_type == AVMEDIA_TYPE_VIDEO)
2927  is->queue_attachments_req = 1;
2928 }
2929 
2930 
2932 {
2933 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2934  /* OS X needs to reallocate the SDL overlays */
2935  int i;
2936  for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2937  is->pictq[i].reallocate = 1;
2938 #endif
2940  video_open(is, 1, NULL);
2941 }
2942 
2944 {
2945  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2946  int next = is->show_mode;
2947  do {
2948  next = (next + 1) % SHOW_MODE_NB;
2949  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
2950  if (is->show_mode != next) {
2952  is->xleft, is->ytop, is->width, is->height,
2953  bgcolor, 1);
2954  is->force_refresh = 1;
2955  is->show_mode = next;
2956  }
2957 }
2958 
2959 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
2960  double remaining_time = 0.0;
2961  SDL_PumpEvents();
2962  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
2964  SDL_ShowCursor(0);
2965  cursor_hidden = 1;
2966  }
2967  if (remaining_time > 0.0)
2968  av_usleep((int64_t)(remaining_time * 1000000.0));
2969  remaining_time = REFRESH_RATE;
2970  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
2971  video_refresh(is, &remaining_time);
2972  SDL_PumpEvents();
2973  }
2974 }
2975 
2976 /* handle an event sent by the GUI */
2977 static void event_loop(VideoState *cur_stream)
2978 {
2979  SDL_Event event;
2980  double incr, pos, frac;
2981 
2982  for (;;) {
2983  double x;
2984  refresh_loop_wait_event(cur_stream, &event);
2985  switch (event.type) {
2986  case SDL_KEYDOWN:
2987  if (exit_on_keydown) {
2988  do_exit(cur_stream);
2989  break;
2990  }
2991  switch (event.key.keysym.sym) {
2992  case SDLK_ESCAPE:
2993  case SDLK_q:
2994  do_exit(cur_stream);
2995  break;
2996  case SDLK_f:
2997  toggle_full_screen(cur_stream);
2998  cur_stream->force_refresh = 1;
2999  break;
3000  case SDLK_p:
3001  case SDLK_SPACE:
3002  toggle_pause(cur_stream);
3003  break;
3004  case SDLK_s: // S: Step to next frame
3005  step_to_next_frame(cur_stream);
3006  break;
3007  case SDLK_a:
3009  break;
3010  case SDLK_v:
3012  break;
3013  case SDLK_t:
3015  break;
3016  case SDLK_w:
3017  toggle_audio_display(cur_stream);
3018  break;
3019  case SDLK_PAGEUP:
3020  incr = 600.0;
3021  goto do_seek;
3022  case SDLK_PAGEDOWN:
3023  incr = -600.0;
3024  goto do_seek;
3025  case SDLK_LEFT:
3026  incr = -10.0;
3027  goto do_seek;
3028  case SDLK_RIGHT:
3029  incr = 10.0;
3030  goto do_seek;
3031  case SDLK_UP:
3032  incr = 60.0;
3033  goto do_seek;
3034  case SDLK_DOWN:
3035  incr = -60.0;
3036  do_seek:
3037  if (seek_by_bytes) {
3038  if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
3039  pos = cur_stream->video_current_pos;
3040  } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
3041  pos = cur_stream->audio_pkt.pos;
3042  } else
3043  pos = avio_tell(cur_stream->ic->pb);
3044  if (cur_stream->ic->bit_rate)
3045  incr *= cur_stream->ic->bit_rate / 8.0;
3046  else
3047  incr *= 180000.0;
3048  pos += incr;
3049  stream_seek(cur_stream, pos, incr, 1);
3050  } else {
3051  pos = get_master_clock(cur_stream);
3052  if (isnan(pos))
3053  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3054  pos += incr;
3055  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3056  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3057  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3058  }
3059  break;
3060  default:
3061  break;
3062  }
3063  break;
3064  case SDL_VIDEOEXPOSE:
3065  cur_stream->force_refresh = 1;
3066  break;
3067  case SDL_MOUSEBUTTONDOWN:
3068  if (exit_on_mousedown) {
3069  do_exit(cur_stream);
3070  break;
3071  }
3072  case SDL_MOUSEMOTION:
3073  if (cursor_hidden) {
3074  SDL_ShowCursor(1);
3075  cursor_hidden = 0;
3076  }
3078  if (event.type == SDL_MOUSEBUTTONDOWN) {
3079  x = event.button.x;
3080  } else {
3081  if (event.motion.state != SDL_PRESSED)
3082  break;
3083  x = event.motion.x;
3084  }
3085  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3086  uint64_t size = avio_size(cur_stream->ic->pb);
3087  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3088  } else {
3089  int64_t ts;
3090  int ns, hh, mm, ss;
3091  int tns, thh, tmm, tss;
3092  tns = cur_stream->ic->duration / 1000000LL;
3093  thh = tns / 3600;
3094  tmm = (tns % 3600) / 60;
3095  tss = (tns % 60);
3096  frac = x / cur_stream->width;
3097  ns = frac * tns;
3098  hh = ns / 3600;
3099  mm = (ns % 3600) / 60;
3100  ss = (ns % 60);
3101  fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3102  hh, mm, ss, thh, tmm, tss);
3103  ts = frac * cur_stream->ic->duration;
3104  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3105  ts += cur_stream->ic->start_time;
3106  stream_seek(cur_stream, ts, 0, 0);
3107  }
3108  break;
3109  case SDL_VIDEORESIZE:
3110  screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
3111  SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
3112  screen_width = cur_stream->width = event.resize.w;
3113  screen_height = cur_stream->height = event.resize.h;
3114  cur_stream->force_refresh = 1;
3115  break;
3116  case SDL_QUIT:
3117  case FF_QUIT_EVENT:
3118  do_exit(cur_stream);
3119  break;
3120  case FF_ALLOC_EVENT:
3121  alloc_picture(event.user.data1);
3122  break;
3123  default:
3124  break;
3125  }
3126  }
3127 }
3128 
3129 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3130 {
3131  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3132  return opt_default(NULL, "video_size", arg);
3133 }
3134 
3135 static int opt_width(void *optctx, const char *opt, const char *arg)
3136 {
3137  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3138  return 0;
3139 }
3140 
3141 static int opt_height(void *optctx, const char *opt, const char *arg)
3142 {
3143  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3144  return 0;
3145 }
3146 
3147 static int opt_format(void *optctx, const char *opt, const char *arg)
3148 {
3149  file_iformat = av_find_input_format(arg);
3150  if (!file_iformat) {
3151  fprintf(stderr, "Unknown input format: %s\n", arg);
3152  return AVERROR(EINVAL);
3153  }
3154  return 0;
3155 }
3156 
3157 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3158 {
3159  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3160  return opt_default(NULL, "pixel_format", arg);
3161 }
3162 
3163 static int opt_sync(void *optctx, const char *opt, const char *arg)
3164 {
3165  if (!strcmp(arg, "audio"))
3167  else if (!strcmp(arg, "video"))
3169  else if (!strcmp(arg, "ext"))
3171  else {
3172  fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3173  exit(1);
3174  }
3175  return 0;
3176 }
3177 
3178 static int opt_seek(void *optctx, const char *opt, const char *arg)
3179 {
3180  start_time = parse_time_or_die(opt, arg, 1);
3181  return 0;
3182 }
3183 
3184 static int opt_duration(void *optctx, const char *opt, const char *arg)
3185 {
3186  duration = parse_time_or_die(opt, arg, 1);
3187  return 0;
3188 }
3189 
3190 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3191 {
3192  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3193  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3194  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3195  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3196  return 0;
3197 }
3198 
3199 static void opt_input_file(void *optctx, const char *filename)
3200 {
3201  if (input_filename) {
3202  fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3203  filename, input_filename);
3204  exit(1);
3205  }
3206  if (!strcmp(filename, "-"))
3207  filename = "pipe:";
3208  input_filename = filename;
3209 }
3210 
3211 static int opt_codec(void *optctx, const char *opt, const char *arg)
3212 {
3213  const char *spec = strchr(opt, ':');
3214  if (!spec) {
3215  fprintf(stderr, "No media specifier was specified in '%s' in option '%s'\n",
3216  arg, opt);
3217  return AVERROR(EINVAL);
3218  }
3219  spec++;
3220  switch (spec[0]) {
3221  case 'a' : audio_codec_name = arg; break;
3222  case 's' : subtitle_codec_name = arg; break;
3223  case 'v' : video_codec_name = arg; break;
3224  default:
3225  fprintf(stderr, "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3226  return AVERROR(EINVAL);
3227  }
3228  return 0;
3229 }
3230 
3231 static int dummy;
3232 
3233 static const OptionDef options[] = {
3234 #include "cmdutils_common_opts.h"
3235  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3236  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3237  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3238  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3239  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3240  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3241  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3242  { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3243  { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3244  { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3245  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3246  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3247  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3248  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3249  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3250  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3251  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3252  { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
3253  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3254  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3255  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3256  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3257  { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
3258  { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
3259  { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
3260  { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo", "algo" },
3261  { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options", "bit_mask" },
3262  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3263  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3264  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3265  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3266  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3267  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3268  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3269  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3270 #if CONFIG_AVFILTER
3271  { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "set video filters", "filter_graph" },
3272 #endif
3273  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3274  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3275  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3276  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3277  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3278  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3279  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3280  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3281  { NULL, },
3282 };
3283 
3284 static void show_usage(void)
3285 {
3286  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3287  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3288  av_log(NULL, AV_LOG_INFO, "\n");
3289 }
3290 
3291 void show_help_default(const char *opt, const char *arg)
3292 {
3294  show_usage();
3295  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3296  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3297  printf("\n");
3300 #if !CONFIG_AVFILTER
3302 #else
3304 #endif
3305  printf("\nWhile playing:\n"
3306  "q, ESC quit\n"
3307  "f toggle full screen\n"
3308  "p, SPC pause\n"
3309  "a cycle audio channel\n"
3310  "v cycle video channel\n"
3311  "t cycle subtitle channel\n"
3312  "w show audio waves\n"
3313  "s activate frame-step mode\n"
3314  "left/right seek backward/forward 10 seconds\n"
3315  "down/up seek backward/forward 1 minute\n"
3316  "page down/page up seek backward/forward 10 minutes\n"
3317  "mouse click seek to percentage in file corresponding to fraction of width\n"
3318  );
3319 }
3320 
3321 static int lockmgr(void **mtx, enum AVLockOp op)
3322 {
3323  switch(op) {
3324  case AV_LOCK_CREATE:
3325  *mtx = SDL_CreateMutex();
3326  if(!*mtx)
3327  return 1;
3328  return 0;
3329  case AV_LOCK_OBTAIN:
3330  return !!SDL_LockMutex(*mtx);
3331  case AV_LOCK_RELEASE:
3332  return !!SDL_UnlockMutex(*mtx);
3333  case AV_LOCK_DESTROY:
3334  SDL_DestroyMutex(*mtx);
3335  return 0;
3336  }
3337  return 1;
3338 }
3339 
3340 /* Called from the main */
3341 int main(int argc, char **argv)
3342 {
3343  int flags;
3344  VideoState *is;
3345  char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3346 
3348  parse_loglevel(argc, argv, options);
3349 
3350  /* register all codecs, demux and protocols */
3352 #if CONFIG_AVDEVICE
3354 #endif
3355 #if CONFIG_AVFILTER
3357 #endif
3358  av_register_all();
3360 
3361  init_opts();
3362 
3363  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3364  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3365 
3366  show_banner(argc, argv, options);
3367 
3368  parse_options(NULL, argc, argv, options, opt_input_file);
3369 
3370  if (!input_filename) {
3371  show_usage();
3372  fprintf(stderr, "An input file must be specified\n");
3373  fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3374  exit(1);
3375  }
3376 
3377  if (display_disable) {
3378  video_disable = 1;
3379  }
3380  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3381  if (audio_disable)
3382  flags &= ~SDL_INIT_AUDIO;
3383  if (display_disable)
3384  SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3385 #if !defined(__MINGW32__) && !defined(__APPLE__)
3386  flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3387 #endif
3388  if (SDL_Init (flags)) {
3389  fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3390  fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3391  exit(1);
3392  }
3393 
3394  if (!display_disable) {
3395 #if HAVE_SDL_VIDEO_SIZE
3396  const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3397  fs_screen_width = vi->current_w;
3398  fs_screen_height = vi->current_h;
3399 #endif
3400  }
3401 
3402  SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3403  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3404  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3405 
3407  fprintf(stderr, "Could not initialize lock manager!\n");
3408  do_exit(NULL);
3409  }
3410 
3411  av_init_packet(&flush_pkt);
3412  flush_pkt.data = (char *)(intptr_t)"FLUSH";
3413 
3414  is = stream_open(input_filename, file_iformat);
3415  if (!is) {
3416  fprintf(stderr, "Failed to initialize VideoState!\n");
3417  do_exit(NULL);
3418  }
3419 
3420  event_loop(is);
3421 
3422  /* never returns */
3423 
3424  return 0;
3425 }