FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include "libavutil/avstring.h"
32 #include "libavutil/colorspace.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/dict.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/avassert.h"
40 #include "libavutil/time.h"
41 #include "libavformat/avformat.h"
42 #include "libavdevice/avdevice.h"
43 #include "libswscale/swscale.h"
44 #include "libavutil/opt.h"
45 #include "libavcodec/avfft.h"
47 
48 #if CONFIG_AVFILTER
49 # include "libavfilter/avcodec.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/buffersink.h"
52 # include "libavfilter/buffersrc.h"
53 #endif
54 
55 #include <SDL.h>
56 #include <SDL_thread.h>
57 
58 #include "cmdutils.h"
59 
60 #include <assert.h>
61 
62 const char program_name[] = "ffplay";
63 const int program_birth_year = 2003;
64 
65 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
66 #define MIN_FRAMES 5
67 
68 /* SDL audio buffer size, in samples. Should be small to have precise
69  A/V sync as SDL does not have hardware buffer fullness info. */
70 #define SDL_AUDIO_BUFFER_SIZE 1024
71 
72 /* no AV sync correction is done if below the minimum AV sync threshold */
73 #define AV_SYNC_THRESHOLD_MIN 0.01
74 /* AV sync correction is done if above the maximum AV sync threshold */
75 #define AV_SYNC_THRESHOLD_MAX 0.1
76 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
77 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
78 /* no AV correction is done if too big error */
79 #define AV_NOSYNC_THRESHOLD 10.0
80 
81 /* maximum audio speed change to get correct sync */
82 #define SAMPLE_CORRECTION_PERCENT_MAX 10
83 
84 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
85 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
86 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
87 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
88 
89 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
90 #define AUDIO_DIFF_AVG_NB 20
91 
92 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
93 #define REFRESH_RATE 0.01
94 
95 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
96 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
97 #define SAMPLE_ARRAY_SIZE (8 * 65536)
98 
99 #define CURSOR_HIDE_DELAY 1000000
100 
101 static int64_t sws_flags = SWS_BICUBIC;
102 
103 typedef struct MyAVPacketList {
106  int serial;
108 
109 typedef struct PacketQueue {
112  int size;
114  int serial;
115  SDL_mutex *mutex;
116  SDL_cond *cond;
117 } PacketQueue;
118 
119 #define VIDEO_PICTURE_QUEUE_SIZE 3
120 #define SUBPICTURE_QUEUE_SIZE 4
121 
122 typedef struct VideoPicture {
123  double pts; // presentation timestamp for this picture
124  int64_t pos; // byte position in file
125  SDL_Overlay *bmp;
126  int width, height; /* source height & width */
129  int serial;
130 
132 } VideoPicture;
133 
134 typedef struct SubPicture {
135  double pts; /* presentation time stamp for this picture */
137  int serial;
138 } SubPicture;
139 
140 typedef struct AudioParams {
141  int freq;
142  int channels;
143  int64_t channel_layout;
145 } AudioParams;
146 
147 typedef struct Clock {
148  double pts; /* clock base */
149  double pts_drift; /* clock base minus time at which we updated the clock */
150  double last_updated;
151  double speed;
152  int serial; /* clock is based on a packet with this serial */
153  int paused;
154  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
155 } Clock;
156 
157 enum {
158  AV_SYNC_AUDIO_MASTER, /* default choice */
160  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
161 };
162 
163 typedef struct VideoState {
164  SDL_Thread *read_tid;
165  SDL_Thread *video_tid;
170  int paused;
173  int seek_req;
175  int64_t seek_pos;
176  int64_t seek_rel;
179  int realtime;
180 
184 
186 
188 
189  double audio_clock;
191  double audio_diff_cum; /* used for AV difference average computation */
201  unsigned int audio_buf_size; /* in bytes */
202  unsigned int audio_buf1_size;
203  int audio_buf_index; /* in bytes */
211 #if CONFIG_AVFILTER
212  struct AudioParams audio_filter_src;
213 #endif
219 
220  enum ShowMode {
222  } show_mode;
229  int xpos;
231 
232  SDL_Thread *subtitle_tid;
238  SDL_mutex *subpq_mutex;
239  SDL_cond *subpq_cond;
240 
241  double frame_timer;
252  int64_t video_current_pos; // current displayed file pos
253  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
256  SDL_mutex *pictq_mutex;
257  SDL_cond *pictq_cond;
258 #if !CONFIG_AVFILTER
260 #endif
262 
263  char filename[1024];
265  int step;
266 
267 #if CONFIG_AVFILTER
268  AVFilterContext *in_video_filter; // the first filter in the video chain
269  AVFilterContext *out_video_filter; // the last filter in the video chain
270  AVFilterContext *in_audio_filter; // the first filter in the audio chain
271  AVFilterContext *out_audio_filter; // the last filter in the audio chain
272  AVFilterGraph *agraph; // audio filter graph
273 #endif
274 
276 
278 } VideoState;
279 
280 /* options specified by the user */
282 static const char *input_filename;
283 static const char *window_title;
284 static int fs_screen_width;
285 static int fs_screen_height;
286 static int default_width = 640;
287 static int default_height = 480;
288 static int screen_width = 0;
289 static int screen_height = 0;
290 static int audio_disable;
291 static int video_disable;
292 static int subtitle_disable;
294  [AVMEDIA_TYPE_AUDIO] = -1,
295  [AVMEDIA_TYPE_VIDEO] = -1,
296  [AVMEDIA_TYPE_SUBTITLE] = -1,
297 };
298 static int seek_by_bytes = -1;
299 static int display_disable;
300 static int show_status = 1;
302 static int64_t start_time = AV_NOPTS_VALUE;
303 static int64_t duration = AV_NOPTS_VALUE;
304 static int workaround_bugs = 1;
305 static int fast = 0;
306 static int genpts = 0;
307 static int lowres = 0;
308 static int idct = FF_IDCT_AUTO;
309 static int error_concealment = 3;
310 static int decoder_reorder_pts = -1;
311 static int autoexit;
312 static int exit_on_keydown;
313 static int exit_on_mousedown;
314 static int loop = 1;
315 static int framedrop = -1;
316 static int infinite_buffer = -1;
317 static enum ShowMode show_mode = SHOW_MODE_NONE;
318 static const char *audio_codec_name;
319 static const char *subtitle_codec_name;
320 static const char *video_codec_name;
321 double rdftspeed = 0.02;
322 static int64_t cursor_last_shown;
323 static int cursor_hidden = 0;
324 #if CONFIG_AVFILTER
325 static char *vfilters = NULL;
326 static char *afilters = NULL;
327 #endif
328 
329 /* current context */
330 static int is_full_screen;
331 static int64_t audio_callback_time;
332 
334 
335 #define FF_ALLOC_EVENT (SDL_USEREVENT)
336 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
337 
338 static SDL_Surface *screen;
339 
340 static inline
341 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
342  enum AVSampleFormat fmt2, int64_t channel_count2)
343 {
344  /* If channel count == 1, planar and non-planar formats are the same */
345  if (channel_count1 == 1 && channel_count2 == 1)
347  else
348  return channel_count1 != channel_count2 || fmt1 != fmt2;
349 }
350 
351 static inline
352 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
353 {
354  if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
355  return channel_layout;
356  else
357  return 0;
358 }
359 
360 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
361 
363 {
364  MyAVPacketList *pkt1;
365 
366  if (q->abort_request)
367  return -1;
368 
369  pkt1 = av_malloc(sizeof(MyAVPacketList));
370  if (!pkt1)
371  return -1;
372  pkt1->pkt = *pkt;
373  pkt1->next = NULL;
374  if (pkt == &flush_pkt)
375  q->serial++;
376  pkt1->serial = q->serial;
377 
378  if (!q->last_pkt)
379  q->first_pkt = pkt1;
380  else
381  q->last_pkt->next = pkt1;
382  q->last_pkt = pkt1;
383  q->nb_packets++;
384  q->size += pkt1->pkt.size + sizeof(*pkt1);
385  /* XXX: should duplicate packet data in DV case */
386  SDL_CondSignal(q->cond);
387  return 0;
388 }
389 
391 {
392  int ret;
393 
394  /* duplicate the packet */
395  if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
396  return -1;
397 
398  SDL_LockMutex(q->mutex);
399  ret = packet_queue_put_private(q, pkt);
400  SDL_UnlockMutex(q->mutex);
401 
402  if (pkt != &flush_pkt && ret < 0)
403  av_free_packet(pkt);
404 
405  return ret;
406 }
407 
408 /* packet queue handling */
410 {
411  memset(q, 0, sizeof(PacketQueue));
412  q->mutex = SDL_CreateMutex();
413  q->cond = SDL_CreateCond();
414  q->abort_request = 1;
415 }
416 
418 {
419  MyAVPacketList *pkt, *pkt1;
420 
421  SDL_LockMutex(q->mutex);
422  for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
423  pkt1 = pkt->next;
424  av_free_packet(&pkt->pkt);
425  av_freep(&pkt);
426  }
427  q->last_pkt = NULL;
428  q->first_pkt = NULL;
429  q->nb_packets = 0;
430  q->size = 0;
431  SDL_UnlockMutex(q->mutex);
432 }
433 
435 {
437  SDL_DestroyMutex(q->mutex);
438  SDL_DestroyCond(q->cond);
439 }
440 
442 {
443  SDL_LockMutex(q->mutex);
444 
445  q->abort_request = 1;
446 
447  SDL_CondSignal(q->cond);
448 
449  SDL_UnlockMutex(q->mutex);
450 }
451 
453 {
454  SDL_LockMutex(q->mutex);
455  q->abort_request = 0;
456  packet_queue_put_private(q, &flush_pkt);
457  SDL_UnlockMutex(q->mutex);
458 }
459 
460 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
461 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
462 {
463  MyAVPacketList *pkt1;
464  int ret;
465 
466  SDL_LockMutex(q->mutex);
467 
468  for (;;) {
469  if (q->abort_request) {
470  ret = -1;
471  break;
472  }
473 
474  pkt1 = q->first_pkt;
475  if (pkt1) {
476  q->first_pkt = pkt1->next;
477  if (!q->first_pkt)
478  q->last_pkt = NULL;
479  q->nb_packets--;
480  q->size -= pkt1->pkt.size + sizeof(*pkt1);
481  *pkt = pkt1->pkt;
482  if (serial)
483  *serial = pkt1->serial;
484  av_free(pkt1);
485  ret = 1;
486  break;
487  } else if (!block) {
488  ret = 0;
489  break;
490  } else {
491  SDL_CondWait(q->cond, q->mutex);
492  }
493  }
494  SDL_UnlockMutex(q->mutex);
495  return ret;
496 }
497 
498 static inline void fill_rectangle(SDL_Surface *screen,
499  int x, int y, int w, int h, int color, int update)
500 {
501  SDL_Rect rect;
502  rect.x = x;
503  rect.y = y;
504  rect.w = w;
505  rect.h = h;
506  SDL_FillRect(screen, &rect, color);
507  if (update && w > 0 && h > 0)
508  SDL_UpdateRect(screen, x, y, w, h);
509 }
510 
511 /* draw only the border of a rectangle */
512 static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
513 {
514  int w1, w2, h1, h2;
515 
516  /* fill the background */
517  w1 = x;
518  if (w1 < 0)
519  w1 = 0;
520  w2 = width - (x + w);
521  if (w2 < 0)
522  w2 = 0;
523  h1 = y;
524  if (h1 < 0)
525  h1 = 0;
526  h2 = height - (y + h);
527  if (h2 < 0)
528  h2 = 0;
530  xleft, ytop,
531  w1, height,
532  color, update);
534  xleft + width - w2, ytop,
535  w2, height,
536  color, update);
538  xleft + w1, ytop,
539  width - w1 - w2, h1,
540  color, update);
542  xleft + w1, ytop + height - h2,
543  width - w1 - w2, h2,
544  color, update);
545 }
546 
547 #define ALPHA_BLEND(a, oldp, newp, s)\
548 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
549 
550 #define RGBA_IN(r, g, b, a, s)\
551 {\
552  unsigned int v = ((const uint32_t *)(s))[0];\
553  a = (v >> 24) & 0xff;\
554  r = (v >> 16) & 0xff;\
555  g = (v >> 8) & 0xff;\
556  b = v & 0xff;\
557 }
558 
559 #define YUVA_IN(y, u, v, a, s, pal)\
560 {\
561  unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
562  a = (val >> 24) & 0xff;\
563  y = (val >> 16) & 0xff;\
564  u = (val >> 8) & 0xff;\
565  v = val & 0xff;\
566 }
567 
568 #define YUVA_OUT(d, y, u, v, a)\
569 {\
570  ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
571 }
572 
573 
574 #define BPP 1
575 
576 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
577 {
578  int wrap, wrap3, width2, skip2;
579  int y, u, v, a, u1, v1, a1, w, h;
580  uint8_t *lum, *cb, *cr;
581  const uint8_t *p;
582  const uint32_t *pal;
583  int dstx, dsty, dstw, dsth;
584 
585  dstw = av_clip(rect->w, 0, imgw);
586  dsth = av_clip(rect->h, 0, imgh);
587  dstx = av_clip(rect->x, 0, imgw - dstw);
588  dsty = av_clip(rect->y, 0, imgh - dsth);
589  lum = dst->data[0] + dsty * dst->linesize[0];
590  cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
591  cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
592 
593  width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
594  skip2 = dstx >> 1;
595  wrap = dst->linesize[0];
596  wrap3 = rect->pict.linesize[0];
597  p = rect->pict.data[0];
598  pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
599 
600  if (dsty & 1) {
601  lum += dstx;
602  cb += skip2;
603  cr += skip2;
604 
605  if (dstx & 1) {
606  YUVA_IN(y, u, v, a, p, pal);
607  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
608  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
609  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
610  cb++;
611  cr++;
612  lum++;
613  p += BPP;
614  }
615  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
616  YUVA_IN(y, u, v, a, p, pal);
617  u1 = u;
618  v1 = v;
619  a1 = a;
620  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
621 
622  YUVA_IN(y, u, v, a, p + BPP, pal);
623  u1 += u;
624  v1 += v;
625  a1 += a;
626  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
627  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
628  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
629  cb++;
630  cr++;
631  p += 2 * BPP;
632  lum += 2;
633  }
634  if (w) {
635  YUVA_IN(y, u, v, a, p, pal);
636  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
637  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
638  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
639  p++;
640  lum++;
641  }
642  p += wrap3 - dstw * BPP;
643  lum += wrap - dstw - dstx;
644  cb += dst->linesize[1] - width2 - skip2;
645  cr += dst->linesize[2] - width2 - skip2;
646  }
647  for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
648  lum += dstx;
649  cb += skip2;
650  cr += skip2;
651 
652  if (dstx & 1) {
653  YUVA_IN(y, u, v, a, p, pal);
654  u1 = u;
655  v1 = v;
656  a1 = a;
657  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
658  p += wrap3;
659  lum += wrap;
660  YUVA_IN(y, u, v, a, p, pal);
661  u1 += u;
662  v1 += v;
663  a1 += a;
664  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
665  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
666  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
667  cb++;
668  cr++;
669  p += -wrap3 + BPP;
670  lum += -wrap + 1;
671  }
672  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
673  YUVA_IN(y, u, v, a, p, pal);
674  u1 = u;
675  v1 = v;
676  a1 = a;
677  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
678 
679  YUVA_IN(y, u, v, a, p + BPP, pal);
680  u1 += u;
681  v1 += v;
682  a1 += a;
683  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
684  p += wrap3;
685  lum += wrap;
686 
687  YUVA_IN(y, u, v, a, p, pal);
688  u1 += u;
689  v1 += v;
690  a1 += a;
691  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
692 
693  YUVA_IN(y, u, v, a, p + BPP, pal);
694  u1 += u;
695  v1 += v;
696  a1 += a;
697  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
698 
699  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
700  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
701 
702  cb++;
703  cr++;
704  p += -wrap3 + 2 * BPP;
705  lum += -wrap + 2;
706  }
707  if (w) {
708  YUVA_IN(y, u, v, a, p, pal);
709  u1 = u;
710  v1 = v;
711  a1 = a;
712  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
713  p += wrap3;
714  lum += wrap;
715  YUVA_IN(y, u, v, a, p, pal);
716  u1 += u;
717  v1 += v;
718  a1 += a;
719  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
720  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
721  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
722  cb++;
723  cr++;
724  p += -wrap3 + BPP;
725  lum += -wrap + 1;
726  }
727  p += wrap3 + (wrap3 - dstw * BPP);
728  lum += wrap + (wrap - dstw - dstx);
729  cb += dst->linesize[1] - width2 - skip2;
730  cr += dst->linesize[2] - width2 - skip2;
731  }
732  /* handle odd height */
733  if (h) {
734  lum += dstx;
735  cb += skip2;
736  cr += skip2;
737 
738  if (dstx & 1) {
739  YUVA_IN(y, u, v, a, p, pal);
740  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
741  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
742  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
743  cb++;
744  cr++;
745  lum++;
746  p += BPP;
747  }
748  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
749  YUVA_IN(y, u, v, a, p, pal);
750  u1 = u;
751  v1 = v;
752  a1 = a;
753  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
754 
755  YUVA_IN(y, u, v, a, p + BPP, pal);
756  u1 += u;
757  v1 += v;
758  a1 += a;
759  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
760  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
761  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
762  cb++;
763  cr++;
764  p += 2 * BPP;
765  lum += 2;
766  }
767  if (w) {
768  YUVA_IN(y, u, v, a, p, pal);
769  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
770  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
771  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
772  }
773  }
774 }
775 
777 {
778  avsubtitle_free(&sp->sub);
779 }
780 
781 static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, VideoPicture *vp)
782 {
783  float aspect_ratio;
784  int width, height, x, y;
785 
786  if (vp->sar.num == 0)
787  aspect_ratio = 0;
788  else
789  aspect_ratio = av_q2d(vp->sar);
790 
791  if (aspect_ratio <= 0.0)
792  aspect_ratio = 1.0;
793  aspect_ratio *= (float)vp->width / (float)vp->height;
794 
795  /* XXX: we suppose the screen has a 1.0 pixel ratio */
796  height = scr_height;
797  width = ((int)rint(height * aspect_ratio)) & ~1;
798  if (width > scr_width) {
799  width = scr_width;
800  height = ((int)rint(width / aspect_ratio)) & ~1;
801  }
802  x = (scr_width - width) / 2;
803  y = (scr_height - height) / 2;
804  rect->x = scr_xleft + x;
805  rect->y = scr_ytop + y;
806  rect->w = FFMAX(width, 1);
807  rect->h = FFMAX(height, 1);
808 }
809 
811 {
812  VideoPicture *vp;
813  SubPicture *sp;
814  AVPicture pict;
815  SDL_Rect rect;
816  int i;
817 
818  vp = &is->pictq[is->pictq_rindex];
819  if (vp->bmp) {
820  if (is->subtitle_st) {
821  if (is->subpq_size > 0) {
822  sp = &is->subpq[is->subpq_rindex];
823 
824  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
825  SDL_LockYUVOverlay (vp->bmp);
826 
827  pict.data[0] = vp->bmp->pixels[0];
828  pict.data[1] = vp->bmp->pixels[2];
829  pict.data[2] = vp->bmp->pixels[1];
830 
831  pict.linesize[0] = vp->bmp->pitches[0];
832  pict.linesize[1] = vp->bmp->pitches[2];
833  pict.linesize[2] = vp->bmp->pitches[1];
834 
835  for (i = 0; i < sp->sub.num_rects; i++)
836  blend_subrect(&pict, sp->sub.rects[i],
837  vp->bmp->w, vp->bmp->h);
838 
839  SDL_UnlockYUVOverlay (vp->bmp);
840  }
841  }
842  }
843 
844  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp);
845 
846  SDL_DisplayYUVOverlay(vp->bmp, &rect);
847 
848  if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
849  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
850  fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
851  is->last_display_rect = rect;
852  }
853  }
854 }
855 
856 static inline int compute_mod(int a, int b)
857 {
858  return a < 0 ? a%b + b : a%b;
859 }
860 
862 {
863  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
864  int ch, channels, h, h2, bgcolor, fgcolor;
865  int64_t time_diff;
866  int rdft_bits, nb_freq;
867 
868  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
869  ;
870  nb_freq = 1 << (rdft_bits - 1);
871 
872  /* compute display index : center on currently output samples */
873  channels = s->audio_tgt.channels;
874  nb_display_channels = channels;
875  if (!s->paused) {
876  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
877  n = 2 * channels;
878  delay = s->audio_write_buf_size;
879  delay /= n;
880 
881  /* to be more precise, we take into account the time spent since
882  the last buffer computation */
883  if (audio_callback_time) {
884  time_diff = av_gettime() - audio_callback_time;
885  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
886  }
887 
888  delay += 2 * data_used;
889  if (delay < data_used)
890  delay = data_used;
891 
892  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
893  if (s->show_mode == SHOW_MODE_WAVES) {
894  h = INT_MIN;
895  for (i = 0; i < 1000; i += channels) {
896  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
897  int a = s->sample_array[idx];
898  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
899  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
900  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
901  int score = a - d;
902  if (h < score && (b ^ c) < 0) {
903  h = score;
904  i_start = idx;
905  }
906  }
907  }
908 
909  s->last_i_start = i_start;
910  } else {
911  i_start = s->last_i_start;
912  }
913 
914  bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
915  if (s->show_mode == SHOW_MODE_WAVES) {
917  s->xleft, s->ytop, s->width, s->height,
918  bgcolor, 0);
919 
920  fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
921 
922  /* total height for one channel */
923  h = s->height / nb_display_channels;
924  /* graph height / 2 */
925  h2 = (h * 9) / 20;
926  for (ch = 0; ch < nb_display_channels; ch++) {
927  i = i_start + ch;
928  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
929  for (x = 0; x < s->width; x++) {
930  y = (s->sample_array[i] * h2) >> 15;
931  if (y < 0) {
932  y = -y;
933  ys = y1 - y;
934  } else {
935  ys = y1;
936  }
938  s->xleft + x, ys, 1, y,
939  fgcolor, 0);
940  i += channels;
941  if (i >= SAMPLE_ARRAY_SIZE)
942  i -= SAMPLE_ARRAY_SIZE;
943  }
944  }
945 
946  fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
947 
948  for (ch = 1; ch < nb_display_channels; ch++) {
949  y = s->ytop + ch * h;
951  s->xleft, y, s->width, 1,
952  fgcolor, 0);
953  }
954  SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
955  } else {
956  nb_display_channels= FFMIN(nb_display_channels, 2);
957  if (rdft_bits != s->rdft_bits) {
958  av_rdft_end(s->rdft);
959  av_free(s->rdft_data);
960  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
961  s->rdft_bits = rdft_bits;
962  s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
963  }
964  {
965  FFTSample *data[2];
966  for (ch = 0; ch < nb_display_channels; ch++) {
967  data[ch] = s->rdft_data + 2 * nb_freq * ch;
968  i = i_start + ch;
969  for (x = 0; x < 2 * nb_freq; x++) {
970  double w = (x-nb_freq) * (1.0 / nb_freq);
971  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
972  i += channels;
973  if (i >= SAMPLE_ARRAY_SIZE)
974  i -= SAMPLE_ARRAY_SIZE;
975  }
976  av_rdft_calc(s->rdft, data[ch]);
977  }
978  // least efficient way to do this, we should of course directly access it but its more than fast enough
979  for (y = 0; y < s->height; y++) {
980  double w = 1 / sqrt(nb_freq);
981  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
982  int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
983  + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
984  a = FFMIN(a, 255);
985  b = FFMIN(b, 255);
986  fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
987 
989  s->xpos, s->height-y, 1, 1,
990  fgcolor, 0);
991  }
992  }
993  SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
994  if (!s->paused)
995  s->xpos++;
996  if (s->xpos >= s->width)
997  s->xpos= s->xleft;
998  }
999 }
1000 
1001 static void stream_close(VideoState *is)
1002 {
1003  VideoPicture *vp;
1004  int i;
1005  /* XXX: use a special url_shutdown call to abort parse cleanly */
1006  is->abort_request = 1;
1007  SDL_WaitThread(is->read_tid, NULL);
1011 
1012  /* free all pictures */
1013  for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1014  vp = &is->pictq[i];
1015  if (vp->bmp) {
1016  SDL_FreeYUVOverlay(vp->bmp);
1017  vp->bmp = NULL;
1018  }
1019  }
1020  SDL_DestroyMutex(is->pictq_mutex);
1021  SDL_DestroyCond(is->pictq_cond);
1022  SDL_DestroyMutex(is->subpq_mutex);
1023  SDL_DestroyCond(is->subpq_cond);
1024  SDL_DestroyCond(is->continue_read_thread);
1025 #if !CONFIG_AVFILTER
1027 #endif
1028  av_free(is);
1029 }
1030 
1031 static void do_exit(VideoState *is)
1032 {
1033  if (is) {
1034  stream_close(is);
1035  }
1036  av_lockmgr_register(NULL);
1037  uninit_opts();
1038 #if CONFIG_AVFILTER
1039  av_freep(&vfilters);
1040 #endif
1042  if (show_status)
1043  printf("\n");
1044  SDL_Quit();
1045  av_log(NULL, AV_LOG_QUIET, "%s", "");
1046  exit(0);
1047 }
1048 
1049 static void sigterm_handler(int sig)
1050 {
1051  exit(123);
1052 }
1053 
1054 static int video_open(VideoState *is, int force_set_video_mode, VideoPicture *vp)
1055 {
1056  int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
1057  int w,h;
1058  SDL_Rect rect;
1059 
1060  if (is_full_screen) flags |= SDL_FULLSCREEN;
1061  else flags |= SDL_RESIZABLE;
1062 
1063  if (vp && vp->width) {
1064  calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
1065  default_width = rect.w;
1066  default_height = rect.h;
1067  }
1068 
1070  w = fs_screen_width;
1071  h = fs_screen_height;
1072  } else if (!is_full_screen && screen_width) {
1073  w = screen_width;
1074  h = screen_height;
1075  } else {
1076  w = default_width;
1077  h = default_height;
1078  }
1079  w = FFMIN(16383, w);
1080  if (screen && is->width == screen->w && screen->w == w
1081  && is->height== screen->h && screen->h == h && !force_set_video_mode)
1082  return 0;
1083  screen = SDL_SetVideoMode(w, h, 0, flags);
1084  if (!screen) {
1085  av_log(NULL, AV_LOG_FATAL, "SDL: could not set video mode - exiting\n");
1086  do_exit(is);
1087  }
1088  if (!window_title)
1090  SDL_WM_SetCaption(window_title, window_title);
1091 
1092  is->width = screen->w;
1093  is->height = screen->h;
1094 
1095  return 0;
1096 }
1097 
1098 /* display the current picture, if any */
1099 static void video_display(VideoState *is)
1100 {
1101  if (!screen)
1102  video_open(is, 0, NULL);
1103  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1104  video_audio_display(is);
1105  else if (is->video_st)
1106  video_image_display(is);
1107 }
1108 
1109 static double get_clock(Clock *c)
1110 {
1111  if (*c->queue_serial != c->serial)
1112  return NAN;
1113  if (c->paused) {
1114  return c->pts;
1115  } else {
1116  double time = av_gettime() / 1000000.0;
1117  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1118  }
1119 }
1120 
1121 static void set_clock_at(Clock *c, double pts, int serial, double time)
1122 {
1123  c->pts = pts;
1124  c->last_updated = time;
1125  c->pts_drift = c->pts - time;
1126  c->serial = serial;
1127 }
1128 
1129 static void set_clock(Clock *c, double pts, int serial)
1130 {
1131  double time = av_gettime() / 1000000.0;
1132  set_clock_at(c, pts, serial, time);
1133 }
1134 
1135 static void set_clock_speed(Clock *c, double speed)
1136 {
1137  set_clock(c, get_clock(c), c->serial);
1138  c->speed = speed;
1139 }
1140 
1141 static void init_clock(Clock *c, int *queue_serial)
1142 {
1143  c->speed = 1.0;
1144  c->paused = 0;
1145  c->queue_serial = queue_serial;
1146  set_clock(c, NAN, -1);
1147 }
1148 
1149 static void sync_clock_to_slave(Clock *c, Clock *slave)
1150 {
1151  double clock = get_clock(c);
1152  double slave_clock = get_clock(slave);
1153  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1154  set_clock(c, slave_clock, slave->serial);
1155 }
1156 
1158  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1159  if (is->video_st)
1160  return AV_SYNC_VIDEO_MASTER;
1161  else
1162  return AV_SYNC_AUDIO_MASTER;
1163  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1164  if (is->audio_st)
1165  return AV_SYNC_AUDIO_MASTER;
1166  else
1167  return AV_SYNC_EXTERNAL_CLOCK;
1168  } else {
1169  return AV_SYNC_EXTERNAL_CLOCK;
1170  }
1171 }
1172 
1173 /* get the current master clock value */
1174 static double get_master_clock(VideoState *is)
1175 {
1176  double val;
1177 
1178  switch (get_master_sync_type(is)) {
1179  case AV_SYNC_VIDEO_MASTER:
1180  val = get_clock(&is->vidclk);
1181  break;
1182  case AV_SYNC_AUDIO_MASTER:
1183  val = get_clock(&is->audclk);
1184  break;
1185  default:
1186  val = get_clock(&is->extclk);
1187  break;
1188  }
1189  return val;
1190 }
1191 
1193  if (is->video_stream >= 0 && is->videoq.nb_packets <= MIN_FRAMES / 2 ||
1194  is->audio_stream >= 0 && is->audioq.nb_packets <= MIN_FRAMES / 2) {
1196  } else if ((is->video_stream < 0 || is->videoq.nb_packets > MIN_FRAMES * 2) &&
1197  (is->audio_stream < 0 || is->audioq.nb_packets > MIN_FRAMES * 2)) {
1199  } else {
1200  double speed = is->extclk.speed;
1201  if (speed != 1.0)
1202  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1203  }
1204 }
1205 
1206 /* seek in the stream */
1207 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1208 {
1209  if (!is->seek_req) {
1210  is->seek_pos = pos;
1211  is->seek_rel = rel;
1212  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1213  if (seek_by_bytes)
1215  is->seek_req = 1;
1216  SDL_CondSignal(is->continue_read_thread);
1217  }
1218 }
1219 
1220 /* pause or resume the video */
1222 {
1223  if (is->paused) {
1224  is->frame_timer += av_gettime() / 1000000.0 + is->vidclk.pts_drift - is->vidclk.pts;
1225  if (is->read_pause_return != AVERROR(ENOSYS)) {
1226  is->vidclk.paused = 0;
1227  }
1228  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1229  }
1230  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1231  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1232 }
1233 
1234 static void toggle_pause(VideoState *is)
1235 {
1236  stream_toggle_pause(is);
1237  is->step = 0;
1238 }
1239 
1241 {
1242  /* if the stream is paused unpause it, then step */
1243  if (is->paused)
1244  stream_toggle_pause(is);
1245  is->step = 1;
1246 }
1247 
1248 static double compute_target_delay(double delay, VideoState *is)
1249 {
1250  double sync_threshold, diff;
1251 
1252  /* update delay to follow master synchronisation source */
1254  /* if video is slave, we try to correct big delays by
1255  duplicating or deleting a frame */
1256  diff = get_clock(&is->vidclk) - get_master_clock(is);
1257 
1258  /* skip or repeat frame. We take into account the
1259  delay to compute the threshold. I still don't know
1260  if it is the best guess */
1261  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1262  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1263  if (diff <= -sync_threshold)
1264  delay = FFMAX(0, delay + diff);
1265  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1266  delay = delay + diff;
1267  else if (diff >= sync_threshold)
1268  delay = 2 * delay;
1269  }
1270  }
1271 
1272  av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1273  delay, -diff);
1274 
1275  return delay;
1276 }
1277 
1278 static void pictq_next_picture(VideoState *is) {
1279  /* update queue size and signal for next picture */
1281  is->pictq_rindex = 0;
1282 
1283  SDL_LockMutex(is->pictq_mutex);
1284  is->pictq_size--;
1285  SDL_CondSignal(is->pictq_cond);
1286  SDL_UnlockMutex(is->pictq_mutex);
1287 }
1288 
1290  VideoPicture *prevvp;
1291  int ret = 0;
1292  /* update queue size and signal for the previous picture */
1294  if (prevvp->allocated && prevvp->serial == is->videoq.serial) {
1295  SDL_LockMutex(is->pictq_mutex);
1297  if (--is->pictq_rindex == -1)
1299  is->pictq_size++;
1300  ret = 1;
1301  }
1302  SDL_CondSignal(is->pictq_cond);
1303  SDL_UnlockMutex(is->pictq_mutex);
1304  }
1305  return ret;
1306 }
1307 
1308 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1309  /* update current video pts */
1310  set_clock(&is->vidclk, pts, serial);
1311  sync_clock_to_slave(&is->extclk, &is->vidclk);
1312  is->video_current_pos = pos;
1313  is->frame_last_pts = pts;
1314 }
1315 
1316 /* called to display each frame */
1317 static void video_refresh(void *opaque, double *remaining_time)
1318 {
1319  VideoState *is = opaque;
1320  VideoPicture *vp;
1321  double time;
1322 
1323  SubPicture *sp, *sp2;
1324 
1325  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1327 
1328  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1329  time = av_gettime() / 1000000.0;
1330  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1331  video_display(is);
1332  is->last_vis_time = time;
1333  }
1334  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1335  }
1336 
1337  if (is->video_st) {
1338  int redisplay = 0;
1339  if (is->force_refresh)
1340  redisplay = pictq_prev_picture(is);
1341 retry:
1342  if (is->pictq_size == 0) {
1343  SDL_LockMutex(is->pictq_mutex);
1347  }
1348  SDL_UnlockMutex(is->pictq_mutex);
1349  // nothing to do, no picture to display in the queue
1350  } else {
1351  double last_duration, duration, delay;
1352  /* dequeue the picture */
1353  vp = &is->pictq[is->pictq_rindex];
1354 
1355  if (vp->serial != is->videoq.serial) {
1356  pictq_next_picture(is);
1357  redisplay = 0;
1358  goto retry;
1359  }
1360 
1361  if (is->paused)
1362  goto display;
1363 
1364  /* compute nominal last_duration */
1365  last_duration = vp->pts - is->frame_last_pts;
1366  if (!isnan(last_duration) && last_duration > 0 && last_duration < is->max_frame_duration) {
1367  /* if duration of the last frame was sane, update last_duration in video state */
1368  is->frame_last_duration = last_duration;
1369  }
1370  if (redisplay)
1371  delay = 0.0;
1372  else
1373  delay = compute_target_delay(is->frame_last_duration, is);
1374 
1375  time= av_gettime()/1000000.0;
1376  if (time < is->frame_timer + delay && !redisplay) {
1377  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1378  return;
1379  }
1380 
1381  is->frame_timer += delay;
1382  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1383  is->frame_timer = time;
1384 
1385  SDL_LockMutex(is->pictq_mutex);
1386  if (!redisplay && !isnan(vp->pts))
1387  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1388  SDL_UnlockMutex(is->pictq_mutex);
1389 
1390  if (is->pictq_size > 1) {
1391  VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1392  duration = nextvp->pts - vp->pts;
1393  if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1394  if (!redisplay)
1395  is->frame_drops_late++;
1396  pictq_next_picture(is);
1397  redisplay = 0;
1398  goto retry;
1399  }
1400  }
1401 
1402  if (is->subtitle_st) {
1403  while (is->subpq_size > 0) {
1404  sp = &is->subpq[is->subpq_rindex];
1405 
1406  if (is->subpq_size > 1)
1407  sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1408  else
1409  sp2 = NULL;
1410 
1411  if (sp->serial != is->subtitleq.serial
1412  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1413  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1414  {
1415  free_subpicture(sp);
1416 
1417  /* update queue size and signal for next picture */
1418  if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1419  is->subpq_rindex = 0;
1420 
1421  SDL_LockMutex(is->subpq_mutex);
1422  is->subpq_size--;
1423  SDL_CondSignal(is->subpq_cond);
1424  SDL_UnlockMutex(is->subpq_mutex);
1425  } else {
1426  break;
1427  }
1428  }
1429  }
1430 
1431 display:
1432  /* display picture */
1433  if (!display_disable && is->show_mode == SHOW_MODE_VIDEO)
1434  video_display(is);
1435 
1436  pictq_next_picture(is);
1437 
1438  if (is->step && !is->paused)
1439  stream_toggle_pause(is);
1440  }
1441  }
1442  is->force_refresh = 0;
1443  if (show_status) {
1444  static int64_t last_time;
1445  int64_t cur_time;
1446  int aqsize, vqsize, sqsize;
1447  double av_diff;
1448 
1449  cur_time = av_gettime();
1450  if (!last_time || (cur_time - last_time) >= 30000) {
1451  aqsize = 0;
1452  vqsize = 0;
1453  sqsize = 0;
1454  if (is->audio_st)
1455  aqsize = is->audioq.size;
1456  if (is->video_st)
1457  vqsize = is->videoq.size;
1458  if (is->subtitle_st)
1459  sqsize = is->subtitleq.size;
1460  av_diff = 0;
1461  if (is->audio_st && is->video_st)
1462  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1463  else if (is->video_st)
1464  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1465  else if (is->audio_st)
1466  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1467  av_log(NULL, AV_LOG_INFO,
1468  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1469  get_master_clock(is),
1470  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1471  av_diff,
1473  aqsize / 1024,
1474  vqsize / 1024,
1475  sqsize,
1478  fflush(stdout);
1479  last_time = cur_time;
1480  }
1481  }
1482 }
1483 
1484 /* allocate a picture (needs to do that in main thread to avoid
1485  potential locking problems */
1486 static void alloc_picture(VideoState *is)
1487 {
1488  VideoPicture *vp;
1489  int64_t bufferdiff;
1490 
1491  vp = &is->pictq[is->pictq_windex];
1492 
1493  if (vp->bmp)
1494  SDL_FreeYUVOverlay(vp->bmp);
1495 
1496  video_open(is, 0, vp);
1497 
1498  vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1499  SDL_YV12_OVERLAY,
1500  screen);
1501  bufferdiff = vp->bmp ? FFMAX(vp->bmp->pixels[0], vp->bmp->pixels[1]) - FFMIN(vp->bmp->pixels[0], vp->bmp->pixels[1]) : 0;
1502  if (!vp->bmp || vp->bmp->pitches[0] < vp->width || bufferdiff < (int64_t)vp->height * vp->bmp->pitches[0]) {
1503  /* SDL allocates a buffer smaller than requested if the video
1504  * overlay hardware is unable to support the requested size. */
1505  av_log(NULL, AV_LOG_FATAL,
1506  "Error: the video system does not support an image\n"
1507  "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1508  "to reduce the image size.\n", vp->width, vp->height );
1509  do_exit(is);
1510  }
1511 
1512  SDL_LockMutex(is->pictq_mutex);
1513  vp->allocated = 1;
1514  SDL_CondSignal(is->pictq_cond);
1515  SDL_UnlockMutex(is->pictq_mutex);
1516 }
1517 
1518 static void duplicate_right_border_pixels(SDL_Overlay *bmp) {
1519  int i, width, height;
1520  Uint8 *p, *maxp;
1521  for (i = 0; i < 3; i++) {
1522  width = bmp->w;
1523  height = bmp->h;
1524  if (i > 0) {
1525  width >>= 1;
1526  height >>= 1;
1527  }
1528  if (bmp->pitches[i] > width) {
1529  maxp = bmp->pixels[i] + bmp->pitches[i] * height - 1;
1530  for (p = bmp->pixels[i] + width - 1; p < maxp; p += bmp->pitches[i])
1531  *(p+1) = *p;
1532  }
1533  }
1534 }
1535 
1536 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos, int serial)
1537 {
1538  VideoPicture *vp;
1539 
1540 #if defined(DEBUG_SYNC) && 0
1541  printf("frame_type=%c pts=%0.3f\n",
1542  av_get_picture_type_char(src_frame->pict_type), pts);
1543 #endif
1544 
1545  /* wait until we have space to put a new picture */
1546  SDL_LockMutex(is->pictq_mutex);
1547 
1548  /* keep the last already displayed picture in the queue */
1549  while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE - 1 &&
1550  !is->videoq.abort_request) {
1551  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1552  }
1553  SDL_UnlockMutex(is->pictq_mutex);
1554 
1555  if (is->videoq.abort_request)
1556  return -1;
1557 
1558  vp = &is->pictq[is->pictq_windex];
1559 
1560  vp->sar = src_frame->sample_aspect_ratio;
1561 
1562  /* alloc or resize hardware picture buffer */
1563  if (!vp->bmp || vp->reallocate || !vp->allocated ||
1564  vp->width != src_frame->width ||
1565  vp->height != src_frame->height) {
1566  SDL_Event event;
1567 
1568  vp->allocated = 0;
1569  vp->reallocate = 0;
1570  vp->width = src_frame->width;
1571  vp->height = src_frame->height;
1572 
1573  /* the allocation must be done in the main thread to avoid
1574  locking problems. */
1575  event.type = FF_ALLOC_EVENT;
1576  event.user.data1 = is;
1577  SDL_PushEvent(&event);
1578 
1579  /* wait until the picture is allocated */
1580  SDL_LockMutex(is->pictq_mutex);
1581  while (!vp->allocated && !is->videoq.abort_request) {
1582  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1583  }
1584  /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1585  if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1586  while (!vp->allocated) {
1587  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1588  }
1589  }
1590  SDL_UnlockMutex(is->pictq_mutex);
1591 
1592  if (is->videoq.abort_request)
1593  return -1;
1594  }
1595 
1596  /* if the frame is not skipped, then display it */
1597  if (vp->bmp) {
1598  AVPicture pict = { { 0 } };
1599 
1600  /* get a pointer on the bitmap */
1601  SDL_LockYUVOverlay (vp->bmp);
1602 
1603  pict.data[0] = vp->bmp->pixels[0];
1604  pict.data[1] = vp->bmp->pixels[2];
1605  pict.data[2] = vp->bmp->pixels[1];
1606 
1607  pict.linesize[0] = vp->bmp->pitches[0];
1608  pict.linesize[1] = vp->bmp->pitches[2];
1609  pict.linesize[2] = vp->bmp->pitches[1];
1610 
1611 #if CONFIG_AVFILTER
1612  // FIXME use direct rendering
1613  av_picture_copy(&pict, (AVPicture *)src_frame,
1614  src_frame->format, vp->width, vp->height);
1615 #else
1616  av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1618  vp->width, vp->height, src_frame->format, vp->width, vp->height,
1619  AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1620  if (is->img_convert_ctx == NULL) {
1621  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1622  exit(1);
1623  }
1624  sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1625  0, vp->height, pict.data, pict.linesize);
1626 #endif
1627  /* workaround SDL PITCH_WORKAROUND */
1629  /* update the bitmap content */
1630  SDL_UnlockYUVOverlay(vp->bmp);
1631 
1632  vp->pts = pts;
1633  vp->pos = pos;
1634  vp->serial = serial;
1635 
1636  /* now we can update the picture count */
1638  is->pictq_windex = 0;
1639  SDL_LockMutex(is->pictq_mutex);
1640  is->pictq_size++;
1641  SDL_UnlockMutex(is->pictq_mutex);
1642  }
1643  return 0;
1644 }
1645 
1646 static int get_video_frame(VideoState *is, AVFrame *frame, AVPacket *pkt, int *serial)
1647 {
1648  int got_picture;
1649 
1650  if (packet_queue_get(&is->videoq, pkt, 1, serial) < 0)
1651  return -1;
1652 
1653  if (pkt->data == flush_pkt.data) {
1655 
1656  SDL_LockMutex(is->pictq_mutex);
1657  // Make sure there are no long delay timers (ideally we should just flush the queue but that's harder)
1658  while (is->pictq_size && !is->videoq.abort_request) {
1659  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1660  }
1661  is->video_current_pos = -1;
1663  is->frame_last_duration = 0;
1664  is->frame_timer = (double)av_gettime() / 1000000.0;
1666  SDL_UnlockMutex(is->pictq_mutex);
1667  return 0;
1668  }
1669 
1670  if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
1671  return 0;
1672 
1673  if (got_picture) {
1674  int ret = 1;
1675  double dpts = NAN;
1676 
1677  if (decoder_reorder_pts == -1) {
1678  frame->pts = av_frame_get_best_effort_timestamp(frame);
1679  } else if (decoder_reorder_pts) {
1680  frame->pts = frame->pkt_pts;
1681  } else {
1682  frame->pts = frame->pkt_dts;
1683  }
1684 
1685  if (frame->pts != AV_NOPTS_VALUE)
1686  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1687 
1688  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1689 
1691  SDL_LockMutex(is->pictq_mutex);
1692  if (is->frame_last_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE) {
1693  double clockdiff = get_clock(&is->vidclk) - get_master_clock(is);
1694  double ptsdiff = dpts - is->frame_last_pts;
1695  if (!isnan(clockdiff) && fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1696  !isnan(ptsdiff) && ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1697  clockdiff + ptsdiff - is->frame_last_filter_delay < 0 &&
1698  is->videoq.nb_packets) {
1699  is->frame_last_dropped_pos = pkt->pos;
1700  is->frame_last_dropped_pts = dpts;
1701  is->frame_last_dropped_serial = *serial;
1702  is->frame_drops_early++;
1703  av_frame_unref(frame);
1704  ret = 0;
1705  }
1706  }
1707  SDL_UnlockMutex(is->pictq_mutex);
1708  }
1709 
1710  return ret;
1711  }
1712  return 0;
1713 }
1714 
1715 #if CONFIG_AVFILTER
1716 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1717  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1718 {
1719  int ret;
1720  AVFilterInOut *outputs = NULL, *inputs = NULL;
1721 
1722  if (filtergraph) {
1723  outputs = avfilter_inout_alloc();
1724  inputs = avfilter_inout_alloc();
1725  if (!outputs || !inputs) {
1726  ret = AVERROR(ENOMEM);
1727  goto fail;
1728  }
1729 
1730  outputs->name = av_strdup("in");
1731  outputs->filter_ctx = source_ctx;
1732  outputs->pad_idx = 0;
1733  outputs->next = NULL;
1734 
1735  inputs->name = av_strdup("out");
1736  inputs->filter_ctx = sink_ctx;
1737  inputs->pad_idx = 0;
1738  inputs->next = NULL;
1739 
1740  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1741  goto fail;
1742  } else {
1743  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1744  goto fail;
1745  }
1746 
1747  ret = avfilter_graph_config(graph, NULL);
1748 fail:
1749  avfilter_inout_free(&outputs);
1750  avfilter_inout_free(&inputs);
1751  return ret;
1752 }
1753 
1754 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1755 {
1756  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1757  char sws_flags_str[128];
1758  char buffersrc_args[256];
1759  int ret;
1760  AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_crop;
1761  AVCodecContext *codec = is->video_st->codec;
1762  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1763 
1764  av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1765  snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1766  graph->scale_sws_opts = av_strdup(sws_flags_str);
1767 
1768  snprintf(buffersrc_args, sizeof(buffersrc_args),
1769  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1770  frame->width, frame->height, frame->format,
1772  codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1773  if (fr.num && fr.den)
1774  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1775 
1776  if ((ret = avfilter_graph_create_filter(&filt_src,
1777  avfilter_get_by_name("buffer"),
1778  "ffplay_buffer", buffersrc_args, NULL,
1779  graph)) < 0)
1780  goto fail;
1781 
1782  ret = avfilter_graph_create_filter(&filt_out,
1783  avfilter_get_by_name("buffersink"),
1784  "ffplay_buffersink", NULL, NULL, graph);
1785  if (ret < 0)
1786  goto fail;
1787 
1788  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1789  goto fail;
1790 
1791  /* SDL YUV code is not handling odd width/height for some driver
1792  * combinations, therefore we crop the picture to an even width/height. */
1793  if ((ret = avfilter_graph_create_filter(&filt_crop,
1794  avfilter_get_by_name("crop"),
1795  "ffplay_crop", "floor(in_w/2)*2:floor(in_h/2)*2", NULL, graph)) < 0)
1796  goto fail;
1797  if ((ret = avfilter_link(filt_crop, 0, filt_out, 0)) < 0)
1798  goto fail;
1799 
1800  if ((ret = configure_filtergraph(graph, vfilters, filt_src, filt_crop)) < 0)
1801  goto fail;
1802 
1803  is->in_video_filter = filt_src;
1804  is->out_video_filter = filt_out;
1805 
1806 fail:
1807  return ret;
1808 }
1809 
1810 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1811 {
1813  int sample_rates[2] = { 0, -1 };
1814  int64_t channel_layouts[2] = { 0, -1 };
1815  int channels[2] = { 0, -1 };
1816  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1817  char asrc_args[256];
1818  int ret;
1819 
1820  avfilter_graph_free(&is->agraph);
1821  if (!(is->agraph = avfilter_graph_alloc()))
1822  return AVERROR(ENOMEM);
1823 
1824  ret = snprintf(asrc_args, sizeof(asrc_args),
1825  "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1826  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1827  is->audio_filter_src.channels,
1828  1, is->audio_filter_src.freq);
1829  if (is->audio_filter_src.channel_layout)
1830  snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1831  ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
1832 
1833  ret = avfilter_graph_create_filter(&filt_asrc,
1834  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1835  asrc_args, NULL, is->agraph);
1836  if (ret < 0)
1837  goto end;
1838 
1839 
1840  ret = avfilter_graph_create_filter(&filt_asink,
1841  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1842  NULL, NULL, is->agraph);
1843  if (ret < 0)
1844  goto end;
1845 
1846  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1847  goto end;
1848  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1849  goto end;
1850 
1851  if (force_output_format) {
1852  channel_layouts[0] = is->audio_tgt.channel_layout;
1853  channels [0] = is->audio_tgt.channels;
1854  sample_rates [0] = is->audio_tgt.freq;
1855  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
1856  goto end;
1857  if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1858  goto end;
1859  if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1860  goto end;
1861  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1862  goto end;
1863  }
1864 
1865 
1866  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
1867  goto end;
1868 
1869  is->in_audio_filter = filt_asrc;
1870  is->out_audio_filter = filt_asink;
1871 
1872 end:
1873  if (ret < 0)
1874  avfilter_graph_free(&is->agraph);
1875  return ret;
1876 }
1877 #endif /* CONFIG_AVFILTER */
1878 
1879 static int video_thread(void *arg)
1880 {
1881  AVPacket pkt = { 0 };
1882  VideoState *is = arg;
1883  AVFrame *frame = av_frame_alloc();
1884  double pts;
1885  int ret;
1886  int serial = 0;
1887 
1888 #if CONFIG_AVFILTER
1890  AVFilterContext *filt_out = NULL, *filt_in = NULL;
1891  int last_w = 0;
1892  int last_h = 0;
1893  enum AVPixelFormat last_format = -2;
1894  int last_serial = -1;
1895 #endif
1896 
1897  for (;;) {
1898  while (is->paused && !is->videoq.abort_request)
1899  SDL_Delay(10);
1900 
1902  av_free_packet(&pkt);
1903 
1904  ret = get_video_frame(is, frame, &pkt, &serial);
1905  if (ret < 0)
1906  goto the_end;
1907  if (!ret)
1908  continue;
1909 
1910 #if CONFIG_AVFILTER
1911  if ( last_w != frame->width
1912  || last_h != frame->height
1913  || last_format != frame->format
1914  || last_serial != serial) {
1915  av_log(NULL, AV_LOG_DEBUG,
1916  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
1917  last_w, last_h,
1918  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
1919  frame->width, frame->height,
1920  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), serial);
1921  avfilter_graph_free(&graph);
1922  graph = avfilter_graph_alloc();
1923  if ((ret = configure_video_filters(graph, is, vfilters, frame)) < 0) {
1924  SDL_Event event;
1925  event.type = FF_QUIT_EVENT;
1926  event.user.data1 = is;
1927  SDL_PushEvent(&event);
1928  av_free_packet(&pkt);
1929  goto the_end;
1930  }
1931  filt_in = is->in_video_filter;
1932  filt_out = is->out_video_filter;
1933  last_w = frame->width;
1934  last_h = frame->height;
1935  last_format = frame->format;
1936  last_serial = serial;
1937  }
1938 
1939  ret = av_buffersrc_add_frame(filt_in, frame);
1940  if (ret < 0)
1941  goto the_end;
1942  av_frame_unref(frame);
1944  av_free_packet(&pkt);
1945 
1946  while (ret >= 0) {
1947  is->frame_last_returned_time = av_gettime() / 1000000.0;
1948 
1949  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
1950  if (ret < 0) {
1951  ret = 0;
1952  break;
1953  }
1954 
1956  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1957  is->frame_last_filter_delay = 0;
1958 
1959  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(filt_out->inputs[0]->time_base);
1960  ret = queue_picture(is, frame, pts, av_frame_get_pkt_pos(frame), serial);
1961  av_frame_unref(frame);
1962  }
1963 #else
1964  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(is->video_st->time_base);
1965  ret = queue_picture(is, frame, pts, pkt.pos, serial);
1966  av_frame_unref(frame);
1967 #endif
1968 
1969  if (ret < 0)
1970  goto the_end;
1971  }
1972  the_end:
1974 #if CONFIG_AVFILTER
1975  avfilter_graph_free(&graph);
1976 #endif
1977  av_free_packet(&pkt);
1978  av_frame_free(&frame);
1979  return 0;
1980 }
1981 
1982 static int subtitle_thread(void *arg)
1983 {
1984  VideoState *is = arg;
1985  SubPicture *sp;
1986  AVPacket pkt1, *pkt = &pkt1;
1987  int got_subtitle;
1988  int serial;
1989  double pts;
1990  int i, j;
1991  int r, g, b, y, u, v, a;
1992 
1993  for (;;) {
1994  while (is->paused && !is->subtitleq.abort_request) {
1995  SDL_Delay(10);
1996  }
1997  if (packet_queue_get(&is->subtitleq, pkt, 1, &serial) < 0)
1998  break;
1999 
2000  if (pkt->data == flush_pkt.data) {
2002  continue;
2003  }
2004  SDL_LockMutex(is->subpq_mutex);
2005  while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
2006  !is->subtitleq.abort_request) {
2007  SDL_CondWait(is->subpq_cond, is->subpq_mutex);
2008  }
2009  SDL_UnlockMutex(is->subpq_mutex);
2010 
2011  if (is->subtitleq.abort_request)
2012  return 0;
2013 
2014  sp = &is->subpq[is->subpq_windex];
2015 
2016  /* NOTE: ipts is the PTS of the _first_ picture beginning in
2017  this packet, if any */
2018  pts = 0;
2019  if (pkt->pts != AV_NOPTS_VALUE)
2020  pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
2021 
2023  &got_subtitle, pkt);
2024  if (got_subtitle && sp->sub.format == 0) {
2025  if (sp->sub.pts != AV_NOPTS_VALUE)
2026  pts = sp->sub.pts / (double)AV_TIME_BASE;
2027  sp->pts = pts;
2028  sp->serial = serial;
2029 
2030  for (i = 0; i < sp->sub.num_rects; i++)
2031  {
2032  for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
2033  {
2034  RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
2035  y = RGB_TO_Y_CCIR(r, g, b);
2036  u = RGB_TO_U_CCIR(r, g, b, 0);
2037  v = RGB_TO_V_CCIR(r, g, b, 0);
2038  YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
2039  }
2040  }
2041 
2042  /* now we can update the picture count */
2043  if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
2044  is->subpq_windex = 0;
2045  SDL_LockMutex(is->subpq_mutex);
2046  is->subpq_size++;
2047  SDL_UnlockMutex(is->subpq_mutex);
2048  }
2049  av_free_packet(pkt);
2050  }
2051  return 0;
2052 }
2053 
2054 /* copy samples for viewing in editor window */
2055 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2056 {
2057  int size, len;
2058 
2059  size = samples_size / sizeof(short);
2060  while (size > 0) {
2062  if (len > size)
2063  len = size;
2064  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2065  samples += len;
2066  is->sample_array_index += len;
2068  is->sample_array_index = 0;
2069  size -= len;
2070  }
2071 }
2072 
2073 /* return the wanted number of samples to get better sync if sync_type is video
2074  * or external master clock */
2075 static int synchronize_audio(VideoState *is, int nb_samples)
2076 {
2077  int wanted_nb_samples = nb_samples;
2078 
2079  /* if not master, then we try to remove or add samples to correct the clock */
2081  double diff, avg_diff;
2082  int min_nb_samples, max_nb_samples;
2083 
2084  diff = get_clock(&is->audclk) - get_master_clock(is);
2085 
2086  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2087  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2089  /* not enough measures to have a correct estimate */
2090  is->audio_diff_avg_count++;
2091  } else {
2092  /* estimate the A-V difference */
2093  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2094 
2095  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2096  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2097  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2098  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2099  wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2100  }
2101  av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2102  diff, avg_diff, wanted_nb_samples - nb_samples,
2104  }
2105  } else {
2106  /* too big difference : may be initial PTS errors, so
2107  reset A-V filter */
2108  is->audio_diff_avg_count = 0;
2109  is->audio_diff_cum = 0;
2110  }
2111  }
2112 
2113  return wanted_nb_samples;
2114 }
2115 
2116 /**
2117  * Decode one audio frame and return its uncompressed size.
2118  *
2119  * The processed audio frame is decoded, converted if required, and
2120  * stored in is->audio_buf, with size in bytes given by the return
2121  * value.
2122  */
2124 {
2125  AVPacket *pkt_temp = &is->audio_pkt_temp;
2126  AVPacket *pkt = &is->audio_pkt;
2127  AVCodecContext *dec = is->audio_st->codec;
2128  int len1, data_size, resampled_data_size;
2129  int64_t dec_channel_layout;
2130  int got_frame;
2131  av_unused double audio_clock0;
2132  int new_packet = 0;
2133  int flush_complete = 0;
2134  int wanted_nb_samples;
2135  AVRational tb;
2136  int ret;
2137  int reconfigure;
2138 
2139  for (;;) {
2140  /* NOTE: the audio packet can contain several frames */
2141  while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet) || is->audio_buf_frames_pending) {
2142  if (!is->frame) {
2143  if (!(is->frame = avcodec_alloc_frame()))
2144  return AVERROR(ENOMEM);
2145  } else {
2146  av_frame_unref(is->frame);
2148  }
2149 
2150  if (is->audioq.serial != is->audio_pkt_temp_serial)
2151  break;
2152 
2153  if (is->paused)
2154  return -1;
2155 
2156  if (!is->audio_buf_frames_pending) {
2157  if (flush_complete)
2158  break;
2159  new_packet = 0;
2160  len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2161  if (len1 < 0) {
2162  /* if error, we skip the frame */
2163  pkt_temp->size = 0;
2164  break;
2165  }
2166 
2167  pkt_temp->data += len1;
2168  pkt_temp->size -= len1;
2169 
2170  if (!got_frame) {
2171  /* stop sending empty packets if the decoder is finished */
2172  if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2173  flush_complete = 1;
2174  continue;
2175  }
2176 
2177  tb = (AVRational){1, is->frame->sample_rate};
2178  if (is->frame->pts != AV_NOPTS_VALUE)
2179  is->frame->pts = av_rescale_q(is->frame->pts, dec->time_base, tb);
2180  else if (is->frame->pkt_pts != AV_NOPTS_VALUE)
2181  is->frame->pts = av_rescale_q(is->frame->pkt_pts, is->audio_st->time_base, tb);
2182  if (pkt_temp->pts != AV_NOPTS_VALUE)
2183  pkt_temp->pts += (double) is->frame->nb_samples / is->frame->sample_rate / av_q2d(is->audio_st->time_base);
2184 
2185 #if CONFIG_AVFILTER
2186  dec_channel_layout = get_valid_channel_layout(is->frame->channel_layout, av_frame_get_channels(is->frame));
2187 
2188  reconfigure =
2189  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2190  is->frame->format, av_frame_get_channels(is->frame)) ||
2191  is->audio_filter_src.channel_layout != dec_channel_layout ||
2192  is->audio_filter_src.freq != is->frame->sample_rate ||
2194 
2195  if (reconfigure) {
2196  char buf1[1024], buf2[1024];
2197  av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2198  av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2199  av_log(NULL, AV_LOG_DEBUG,
2200  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2201  is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, is->audio_last_serial,
2203 
2204  is->audio_filter_src.fmt = is->frame->format;
2205  is->audio_filter_src.channels = av_frame_get_channels(is->frame);
2206  is->audio_filter_src.channel_layout = dec_channel_layout;
2207  is->audio_filter_src.freq = is->frame->sample_rate;
2209 
2210  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2211  return ret;
2212  }
2213 
2214  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, is->frame)) < 0)
2215  return ret;
2216  av_frame_unref(is->frame);
2217 #endif
2218  }
2219 #if CONFIG_AVFILTER
2220  if ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, is->frame, 0)) < 0) {
2221  if (ret == AVERROR(EAGAIN)) {
2222  is->audio_buf_frames_pending = 0;
2223  continue;
2224  }
2225  return ret;
2226  }
2227  is->audio_buf_frames_pending = 1;
2228  tb = is->out_audio_filter->inputs[0]->time_base;
2229 #endif
2230 
2232  is->frame->nb_samples,
2233  is->frame->format, 1);
2234 
2235  dec_channel_layout =
2238  wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
2239 
2240  if (is->frame->format != is->audio_src.fmt ||
2241  dec_channel_layout != is->audio_src.channel_layout ||
2242  is->frame->sample_rate != is->audio_src.freq ||
2243  (wanted_nb_samples != is->frame->nb_samples && !is->swr_ctx)) {
2244  swr_free(&is->swr_ctx);
2245  is->swr_ctx = swr_alloc_set_opts(NULL,
2247  dec_channel_layout, is->frame->format, is->frame->sample_rate,
2248  0, NULL);
2249  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2250  av_log(NULL, AV_LOG_ERROR,
2251  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2254  break;
2255  }
2256  is->audio_src.channel_layout = dec_channel_layout;
2258  is->audio_src.freq = is->frame->sample_rate;
2259  is->audio_src.fmt = is->frame->format;
2260  }
2261 
2262  if (is->swr_ctx) {
2263  const uint8_t **in = (const uint8_t **)is->frame->extended_data;
2264  uint8_t **out = &is->audio_buf1;
2265  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate + 256;
2266  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2267  int len2;
2268  if (out_size < 0) {
2269  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2270  break;
2271  }
2272  if (wanted_nb_samples != is->frame->nb_samples) {
2273  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / is->frame->sample_rate,
2274  wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate) < 0) {
2275  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2276  break;
2277  }
2278  }
2279  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2280  if (!is->audio_buf1)
2281  return AVERROR(ENOMEM);
2282  len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples);
2283  if (len2 < 0) {
2284  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2285  break;
2286  }
2287  if (len2 == out_count) {
2288  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2289  swr_init(is->swr_ctx);
2290  }
2291  is->audio_buf = is->audio_buf1;
2292  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2293  } else {
2294  is->audio_buf = is->frame->data[0];
2295  resampled_data_size = data_size;
2296  }
2297 
2298  audio_clock0 = is->audio_clock;
2299  /* update the audio clock with the pts */
2300  if (is->frame->pts != AV_NOPTS_VALUE)
2301  is->audio_clock = is->frame->pts * av_q2d(tb) + (double) is->frame->nb_samples / is->frame->sample_rate;
2302  else
2303  is->audio_clock = NAN;
2305 #ifdef DEBUG
2306  {
2307  static double last_clock;
2308  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2309  is->audio_clock - last_clock,
2310  is->audio_clock, audio_clock0);
2311  last_clock = is->audio_clock;
2312  }
2313 #endif
2314  return resampled_data_size;
2315  }
2316 
2317  /* free the current packet */
2318  if (pkt->data)
2319  av_free_packet(pkt);
2320  memset(pkt_temp, 0, sizeof(*pkt_temp));
2321 
2322  if (is->audioq.abort_request) {
2323  return -1;
2324  }
2325 
2326  if (is->audioq.nb_packets == 0)
2327  SDL_CondSignal(is->continue_read_thread);
2328 
2329  /* read next packet */
2330  if ((new_packet = packet_queue_get(&is->audioq, pkt, 1, &is->audio_pkt_temp_serial)) < 0)
2331  return -1;
2332 
2333  if (pkt->data == flush_pkt.data) {
2334  avcodec_flush_buffers(dec);
2335  flush_complete = 0;
2336  is->audio_buf_frames_pending = 0;
2337  }
2338 
2339  *pkt_temp = *pkt;
2340  }
2341 }
2342 
2343 /* prepare a new audio buffer */
2344 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2345 {
2346  VideoState *is = opaque;
2347  int audio_size, len1;
2348  int bytes_per_sec;
2350 
2352 
2353  while (len > 0) {
2354  if (is->audio_buf_index >= is->audio_buf_size) {
2355  audio_size = audio_decode_frame(is);
2356  if (audio_size < 0) {
2357  /* if error, just output silence */
2358  is->audio_buf = is->silence_buf;
2359  is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
2360  } else {
2361  if (is->show_mode != SHOW_MODE_VIDEO)
2362  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2363  is->audio_buf_size = audio_size;
2364  }
2365  is->audio_buf_index = 0;
2366  }
2367  len1 = is->audio_buf_size - is->audio_buf_index;
2368  if (len1 > len)
2369  len1 = len;
2370  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2371  len -= len1;
2372  stream += len1;
2373  is->audio_buf_index += len1;
2374  }
2375  bytes_per_sec = is->audio_tgt.freq * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2377  /* Let's assume the audio driver that is used by SDL has two periods. */
2378  if (!isnan(is->audio_clock)) {
2379  set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
2380  sync_clock_to_slave(&is->extclk, &is->audclk);
2381  }
2382 }
2383 
2384 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2385 {
2386  SDL_AudioSpec wanted_spec, spec;
2387  const char *env;
2388  const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2389 
2390  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2391  if (env) {
2392  wanted_nb_channels = atoi(env);
2393  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2394  }
2395  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2396  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2397  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2398  }
2399  wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2400  wanted_spec.freq = wanted_sample_rate;
2401  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2402  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2403  return -1;
2404  }
2405  wanted_spec.format = AUDIO_S16SYS;
2406  wanted_spec.silence = 0;
2407  wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2408  wanted_spec.callback = sdl_audio_callback;
2409  wanted_spec.userdata = opaque;
2410  while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2411  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError());
2412  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2413  if (!wanted_spec.channels) {
2414  av_log(NULL, AV_LOG_ERROR,
2415  "No more channel combinations to try, audio open failed\n");
2416  return -1;
2417  }
2418  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2419  }
2420  if (spec.format != AUDIO_S16SYS) {
2421  av_log(NULL, AV_LOG_ERROR,
2422  "SDL advised audio format %d is not supported!\n", spec.format);
2423  return -1;
2424  }
2425  if (spec.channels != wanted_spec.channels) {
2426  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2427  if (!wanted_channel_layout) {
2428  av_log(NULL, AV_LOG_ERROR,
2429  "SDL advised channel count %d is not supported!\n", spec.channels);
2430  return -1;
2431  }
2432  }
2433 
2434  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2435  audio_hw_params->freq = spec.freq;
2436  audio_hw_params->channel_layout = wanted_channel_layout;
2437  audio_hw_params->channels = spec.channels;
2438  return spec.size;
2439 }
2440 
2441 /* open a given stream. Return 0 if OK */
2442 static int stream_component_open(VideoState *is, int stream_index)
2443 {
2444  AVFormatContext *ic = is->ic;
2445  AVCodecContext *avctx;
2446  AVCodec *codec;
2447  const char *forced_codec_name = NULL;
2448  AVDictionary *opts;
2449  AVDictionaryEntry *t = NULL;
2450  int sample_rate, nb_channels;
2451  int64_t channel_layout;
2452  int ret;
2453 
2454  if (stream_index < 0 || stream_index >= ic->nb_streams)
2455  return -1;
2456  avctx = ic->streams[stream_index]->codec;
2457 
2458  codec = avcodec_find_decoder(avctx->codec_id);
2459 
2460  switch(avctx->codec_type){
2461  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2462  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2463  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2464  }
2465  if (forced_codec_name)
2466  codec = avcodec_find_decoder_by_name(forced_codec_name);
2467  if (!codec) {
2468  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2469  "No codec could be found with name '%s'\n", forced_codec_name);
2470  else av_log(NULL, AV_LOG_WARNING,
2471  "No codec could be found with id %d\n", avctx->codec_id);
2472  return -1;
2473  }
2474 
2475  avctx->codec_id = codec->id;
2477  avctx->lowres = lowres;
2478  if(avctx->lowres > codec->max_lowres){
2479  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2480  codec->max_lowres);
2481  avctx->lowres= codec->max_lowres;
2482  }
2483  avctx->idct_algo = idct;
2485 
2486  if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2487  if (fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2488  if(codec->capabilities & CODEC_CAP_DR1)
2489  avctx->flags |= CODEC_FLAG_EMU_EDGE;
2490 
2491  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2492  if (!av_dict_get(opts, "threads", NULL, 0))
2493  av_dict_set(&opts, "threads", "auto", 0);
2494  if (avctx->lowres)
2495  av_dict_set(&opts, "lowres", av_asprintf("%d", avctx->lowres), AV_DICT_DONT_STRDUP_VAL);
2496  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2497  av_dict_set(&opts, "refcounted_frames", "1", 0);
2498  if (avcodec_open2(avctx, codec, &opts) < 0)
2499  return -1;
2500  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2501  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2502  return AVERROR_OPTION_NOT_FOUND;
2503  }
2504 
2505  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2506  switch (avctx->codec_type) {
2507  case AVMEDIA_TYPE_AUDIO:
2508 #if CONFIG_AVFILTER
2509  {
2510  AVFilterLink *link;
2511 
2512  is->audio_filter_src.freq = avctx->sample_rate;
2513  is->audio_filter_src.channels = avctx->channels;
2514  is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2515  is->audio_filter_src.fmt = avctx->sample_fmt;
2516  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2517  return ret;
2518  link = is->out_audio_filter->inputs[0];
2519  sample_rate = link->sample_rate;
2520  nb_channels = link->channels;
2521  channel_layout = link->channel_layout;
2522  }
2523 #else
2524  sample_rate = avctx->sample_rate;
2525  nb_channels = avctx->channels;
2526  channel_layout = avctx->channel_layout;
2527 #endif
2528 
2529  /* prepare audio output */
2530  if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2531  return ret;
2532  is->audio_hw_buf_size = ret;
2533  is->audio_src = is->audio_tgt;
2534  is->audio_buf_size = 0;
2535  is->audio_buf_index = 0;
2536 
2537  /* init averaging filter */
2538  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2539  is->audio_diff_avg_count = 0;
2540  /* since we do not have a precise anough audio fifo fullness,
2541  we correct audio sync only if larger than this threshold */
2543 
2544  memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2545  memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2546 
2547  is->audio_stream = stream_index;
2548  is->audio_st = ic->streams[stream_index];
2549 
2550  packet_queue_start(&is->audioq);
2551  SDL_PauseAudio(0);
2552  break;
2553  case AVMEDIA_TYPE_VIDEO:
2554  is->video_stream = stream_index;
2555  is->video_st = ic->streams[stream_index];
2556 
2557  packet_queue_start(&is->videoq);
2558  is->video_tid = SDL_CreateThread(video_thread, is);
2559  is->queue_attachments_req = 1;
2560  break;
2561  case AVMEDIA_TYPE_SUBTITLE:
2562  is->subtitle_stream = stream_index;
2563  is->subtitle_st = ic->streams[stream_index];
2565 
2566  is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2567  break;
2568  default:
2569  break;
2570  }
2571  return 0;
2572 }
2573 
2574 static void stream_component_close(VideoState *is, int stream_index)
2575 {
2576  AVFormatContext *ic = is->ic;
2577  AVCodecContext *avctx;
2578 
2579  if (stream_index < 0 || stream_index >= ic->nb_streams)
2580  return;
2581  avctx = ic->streams[stream_index]->codec;
2582 
2583  switch (avctx->codec_type) {
2584  case AVMEDIA_TYPE_AUDIO:
2585  packet_queue_abort(&is->audioq);
2586 
2587  SDL_CloseAudio();
2588 
2589  packet_queue_flush(&is->audioq);
2590  av_free_packet(&is->audio_pkt);
2591  swr_free(&is->swr_ctx);
2592  av_freep(&is->audio_buf1);
2593  is->audio_buf1_size = 0;
2594  is->audio_buf = NULL;
2595  av_frame_free(&is->frame);
2596 
2597  if (is->rdft) {
2598  av_rdft_end(is->rdft);
2599  av_freep(&is->rdft_data);
2600  is->rdft = NULL;
2601  is->rdft_bits = 0;
2602  }
2603 #if CONFIG_AVFILTER
2604  avfilter_graph_free(&is->agraph);
2605 #endif
2606  break;
2607  case AVMEDIA_TYPE_VIDEO:
2608  packet_queue_abort(&is->videoq);
2609 
2610  /* note: we also signal this mutex to make sure we deblock the
2611  video thread in all cases */
2612  SDL_LockMutex(is->pictq_mutex);
2613  SDL_CondSignal(is->pictq_cond);
2614  SDL_UnlockMutex(is->pictq_mutex);
2615 
2616  SDL_WaitThread(is->video_tid, NULL);
2617 
2618  packet_queue_flush(&is->videoq);
2619  break;
2620  case AVMEDIA_TYPE_SUBTITLE:
2622 
2623  /* note: we also signal this mutex to make sure we deblock the
2624  video thread in all cases */
2625  SDL_LockMutex(is->subpq_mutex);
2626  SDL_CondSignal(is->subpq_cond);
2627  SDL_UnlockMutex(is->subpq_mutex);
2628 
2629  SDL_WaitThread(is->subtitle_tid, NULL);
2630 
2632  break;
2633  default:
2634  break;
2635  }
2636 
2637  ic->streams[stream_index]->discard = AVDISCARD_ALL;
2638  avcodec_close(avctx);
2639  switch (avctx->codec_type) {
2640  case AVMEDIA_TYPE_AUDIO:
2641  is->audio_st = NULL;
2642  is->audio_stream = -1;
2643  break;
2644  case AVMEDIA_TYPE_VIDEO:
2645  is->video_st = NULL;
2646  is->video_stream = -1;
2647  break;
2648  case AVMEDIA_TYPE_SUBTITLE:
2649  is->subtitle_st = NULL;
2650  is->subtitle_stream = -1;
2651  break;
2652  default:
2653  break;
2654  }
2655 }
2656 
2657 static int decode_interrupt_cb(void *ctx)
2658 {
2659  VideoState *is = ctx;
2660  return is->abort_request;
2661 }
2662 
2664 {
2665  if( !strcmp(s->iformat->name, "rtp")
2666  || !strcmp(s->iformat->name, "rtsp")
2667  || !strcmp(s->iformat->name, "sdp")
2668  )
2669  return 1;
2670 
2671  if(s->pb && ( !strncmp(s->filename, "rtp:", 4)
2672  || !strncmp(s->filename, "udp:", 4)
2673  )
2674  )
2675  return 1;
2676  return 0;
2677 }
2678 
2679 /* this thread gets the stream from the disk or the network */
2680 static int read_thread(void *arg)
2681 {
2682  VideoState *is = arg;
2683  AVFormatContext *ic = NULL;
2684  int err, i, ret;
2685  int st_index[AVMEDIA_TYPE_NB];
2686  AVPacket pkt1, *pkt = &pkt1;
2687  int eof = 0;
2688  int64_t stream_start_time;
2689  int pkt_in_play_range = 0;
2691  AVDictionary **opts;
2692  int orig_nb_streams;
2693  SDL_mutex *wait_mutex = SDL_CreateMutex();
2694 
2695  memset(st_index, -1, sizeof(st_index));
2696  is->last_video_stream = is->video_stream = -1;
2697  is->last_audio_stream = is->audio_stream = -1;
2698  is->last_subtitle_stream = is->subtitle_stream = -1;
2699 
2700  ic = avformat_alloc_context();
2702  ic->interrupt_callback.opaque = is;
2703  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2704  if (err < 0) {
2705  print_error(is->filename, err);
2706  ret = -1;
2707  goto fail;
2708  }
2709  if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2710  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2712  goto fail;
2713  }
2714  is->ic = ic;
2715 
2716  if (genpts)
2717  ic->flags |= AVFMT_FLAG_GENPTS;
2718 
2720  orig_nb_streams = ic->nb_streams;
2721 
2722  err = avformat_find_stream_info(ic, opts);
2723  if (err < 0) {
2724  av_log(NULL, AV_LOG_WARNING,
2725  "%s: could not find codec parameters\n", is->filename);
2726  ret = -1;
2727  goto fail;
2728  }
2729  for (i = 0; i < orig_nb_streams; i++)
2730  av_dict_free(&opts[i]);
2731  av_freep(&opts);
2732 
2733  if (ic->pb)
2734  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2735 
2736  if (seek_by_bytes < 0)
2737  seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2738 
2739  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2740 
2741  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2742  window_title = av_asprintf("%s - %s", t->value, input_filename);
2743 
2744  /* if seeking requested, we execute it */
2745  if (start_time != AV_NOPTS_VALUE) {
2746  int64_t timestamp;
2747 
2748  timestamp = start_time;
2749  /* add the stream start time */
2750  if (ic->start_time != AV_NOPTS_VALUE)
2751  timestamp += ic->start_time;
2752  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2753  if (ret < 0) {
2754  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2755  is->filename, (double)timestamp / AV_TIME_BASE);
2756  }
2757  }
2758 
2759  is->realtime = is_realtime(ic);
2760 
2761  for (i = 0; i < ic->nb_streams; i++)
2762  ic->streams[i]->discard = AVDISCARD_ALL;
2763  if (!video_disable)
2764  st_index[AVMEDIA_TYPE_VIDEO] =
2766  wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2767  if (!audio_disable)
2768  st_index[AVMEDIA_TYPE_AUDIO] =
2771  st_index[AVMEDIA_TYPE_VIDEO],
2772  NULL, 0);
2774  st_index[AVMEDIA_TYPE_SUBTITLE] =
2777  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2778  st_index[AVMEDIA_TYPE_AUDIO] :
2779  st_index[AVMEDIA_TYPE_VIDEO]),
2780  NULL, 0);
2781  if (show_status) {
2782  av_dump_format(ic, 0, is->filename, 0);
2783  }
2784 
2785  is->show_mode = show_mode;
2786 
2787  /* open the streams */
2788  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2789  stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2790  }
2791 
2792  ret = -1;
2793  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2794  ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2795  }
2796  if (is->show_mode == SHOW_MODE_NONE)
2797  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2798 
2799  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2800  stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2801  }
2802 
2803  if (is->video_stream < 0 && is->audio_stream < 0) {
2804  av_log(NULL, AV_LOG_FATAL, "%s: could not open codecs\n", is->filename);
2805  ret = -1;
2806  goto fail;
2807  }
2808 
2809  if (infinite_buffer < 0 && is->realtime)
2810  infinite_buffer = 1;
2811 
2812  for (;;) {
2813  if (is->abort_request)
2814  break;
2815  if (is->paused != is->last_paused) {
2816  is->last_paused = is->paused;
2817  if (is->paused)
2818  is->read_pause_return = av_read_pause(ic);
2819  else
2820  av_read_play(ic);
2821  }
2822 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2823  if (is->paused &&
2824  (!strcmp(ic->iformat->name, "rtsp") ||
2825  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2826  /* wait 10 ms to avoid trying to get another packet */
2827  /* XXX: horrible */
2828  SDL_Delay(10);
2829  continue;
2830  }
2831 #endif
2832  if (is->seek_req) {
2833  int64_t seek_target = is->seek_pos;
2834  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2835  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2836 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2837 // of the seek_pos/seek_rel variables
2838 
2839  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2840  if (ret < 0) {
2841  av_log(NULL, AV_LOG_ERROR,
2842  "%s: error while seeking\n", is->ic->filename);
2843  } else {
2844  if (is->audio_stream >= 0) {
2845  packet_queue_flush(&is->audioq);
2846  packet_queue_put(&is->audioq, &flush_pkt);
2847  }
2848  if (is->subtitle_stream >= 0) {
2850  packet_queue_put(&is->subtitleq, &flush_pkt);
2851  }
2852  if (is->video_stream >= 0) {
2853  packet_queue_flush(&is->videoq);
2854  packet_queue_put(&is->videoq, &flush_pkt);
2855  }
2856  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2857  set_clock(&is->extclk, NAN, 0);
2858  } else {
2859  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
2860  }
2861  }
2862  is->seek_req = 0;
2863  is->queue_attachments_req = 1;
2864  eof = 0;
2865  if (is->paused)
2866  step_to_next_frame(is);
2867  }
2868  if (is->queue_attachments_req) {
2870  AVPacket copy;
2871  if ((ret = av_copy_packet(&copy, &is->video_st->attached_pic)) < 0)
2872  goto fail;
2873  packet_queue_put(&is->videoq, &copy);
2874  }
2875  is->queue_attachments_req = 0;
2876  }
2877 
2878  /* if the queue are full, no need to read more */
2879  if (infinite_buffer<1 &&
2880  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2881  || ( (is->audioq .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2882  && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request
2884  && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
2885  /* wait 10 ms */
2886  SDL_LockMutex(wait_mutex);
2887  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2888  SDL_UnlockMutex(wait_mutex);
2889  continue;
2890  }
2891  if (eof) {
2892  if (is->video_stream >= 0) {
2893  av_init_packet(pkt);
2894  pkt->data = NULL;
2895  pkt->size = 0;
2896  pkt->stream_index = is->video_stream;
2897  packet_queue_put(&is->videoq, pkt);
2898  }
2899  if (is->audio_stream >= 0) {
2900  av_init_packet(pkt);
2901  pkt->data = NULL;
2902  pkt->size = 0;
2903  pkt->stream_index = is->audio_stream;
2904  packet_queue_put(&is->audioq, pkt);
2905  }
2906  SDL_Delay(10);
2907  if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2908  if (loop != 1 && (!loop || --loop)) {
2909  stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2910  } else if (autoexit) {
2911  ret = AVERROR_EOF;
2912  goto fail;
2913  }
2914  }
2915  eof=0;
2916  continue;
2917  }
2918  ret = av_read_frame(ic, pkt);
2919  if (ret < 0) {
2920  if (ret == AVERROR_EOF || url_feof(ic->pb))
2921  eof = 1;
2922  if (ic->pb && ic->pb->error)
2923  break;
2924  SDL_LockMutex(wait_mutex);
2925  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2926  SDL_UnlockMutex(wait_mutex);
2927  continue;
2928  }
2929  /* check if packet is in play range specified by user, then queue, otherwise discard */
2930  stream_start_time = ic->streams[pkt->stream_index]->start_time;
2931  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2932  (pkt->pts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
2933  av_q2d(ic->streams[pkt->stream_index]->time_base) -
2934  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2935  <= ((double)duration / 1000000);
2936  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2937  packet_queue_put(&is->audioq, pkt);
2938  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
2940  packet_queue_put(&is->videoq, pkt);
2941  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2942  packet_queue_put(&is->subtitleq, pkt);
2943  } else {
2944  av_free_packet(pkt);
2945  }
2946  }
2947  /* wait until the end */
2948  while (!is->abort_request) {
2949  SDL_Delay(100);
2950  }
2951 
2952  ret = 0;
2953  fail:
2954  /* close each stream */
2955  if (is->audio_stream >= 0)
2957  if (is->video_stream >= 0)
2959  if (is->subtitle_stream >= 0)
2961  if (is->ic) {
2962  avformat_close_input(&is->ic);
2963  }
2964 
2965  if (ret != 0) {
2966  SDL_Event event;
2967 
2968  event.type = FF_QUIT_EVENT;
2969  event.user.data1 = is;
2970  SDL_PushEvent(&event);
2971  }
2972  SDL_DestroyMutex(wait_mutex);
2973  return 0;
2974 }
2975 
2976 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2977 {
2978  VideoState *is;
2979 
2980  is = av_mallocz(sizeof(VideoState));
2981  if (!is)
2982  return NULL;
2983  av_strlcpy(is->filename, filename, sizeof(is->filename));
2984  is->iformat = iformat;
2985  is->ytop = 0;
2986  is->xleft = 0;
2987 
2988  /* start video display */
2989  is->pictq_mutex = SDL_CreateMutex();
2990  is->pictq_cond = SDL_CreateCond();
2991 
2992  is->subpq_mutex = SDL_CreateMutex();
2993  is->subpq_cond = SDL_CreateCond();
2994 
2995  packet_queue_init(&is->videoq);
2996  packet_queue_init(&is->audioq);
2998 
2999  is->continue_read_thread = SDL_CreateCond();
3000 
3001  init_clock(&is->vidclk, &is->videoq.serial);
3002  init_clock(&is->audclk, &is->audioq.serial);
3003  init_clock(&is->extclk, &is->extclk.serial);
3004  is->audio_clock_serial = -1;
3005  is->audio_last_serial = -1;
3006  is->av_sync_type = av_sync_type;
3007  is->read_tid = SDL_CreateThread(read_thread, is);
3008  if (!is->read_tid) {
3009  av_free(is);
3010  return NULL;
3011  }
3012  return is;
3013 }
3014 
3016 {
3017  AVFormatContext *ic = is->ic;
3018  int start_index, stream_index;
3019  int old_index;
3020  AVStream *st;
3021 
3022  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3023  start_index = is->last_video_stream;
3024  old_index = is->video_stream;
3025  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3026  start_index = is->last_audio_stream;
3027  old_index = is->audio_stream;
3028  } else {
3029  start_index = is->last_subtitle_stream;
3030  old_index = is->subtitle_stream;
3031  }
3032  stream_index = start_index;
3033  for (;;) {
3034  if (++stream_index >= is->ic->nb_streams)
3035  {
3036  if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3037  {
3038  stream_index = -1;
3039  is->last_subtitle_stream = -1;
3040  goto the_end;
3041  }
3042  if (start_index == -1)
3043  return;
3044  stream_index = 0;
3045  }
3046  if (stream_index == start_index)
3047  return;
3048  st = ic->streams[stream_index];
3049  if (st->codec->codec_type == codec_type) {
3050  /* check that parameters are OK */
3051  switch (codec_type) {
3052  case AVMEDIA_TYPE_AUDIO:
3053  if (st->codec->sample_rate != 0 &&
3054  st->codec->channels != 0)
3055  goto the_end;
3056  break;
3057  case AVMEDIA_TYPE_VIDEO:
3058  case AVMEDIA_TYPE_SUBTITLE:
3059  goto the_end;
3060  default:
3061  break;
3062  }
3063  }
3064  }
3065  the_end:
3066  stream_component_close(is, old_index);
3067  stream_component_open(is, stream_index);
3068 }
3069 
3070 
3072 {
3073 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
3074  /* OS X needs to reallocate the SDL overlays */
3075  int i;
3076  for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
3077  is->pictq[i].reallocate = 1;
3078 #endif
3080  video_open(is, 1, NULL);
3081 }
3082 
3084 {
3085  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
3086  int next = is->show_mode;
3087  do {
3088  next = (next + 1) % SHOW_MODE_NB;
3089  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3090  if (is->show_mode != next) {
3092  is->xleft, is->ytop, is->width, is->height,
3093  bgcolor, 1);
3094  is->force_refresh = 1;
3095  is->show_mode = next;
3096  }
3097 }
3098 
3099 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3100  double remaining_time = 0.0;
3101  SDL_PumpEvents();
3102  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
3104  SDL_ShowCursor(0);
3105  cursor_hidden = 1;
3106  }
3107  if (remaining_time > 0.0)
3108  av_usleep((int64_t)(remaining_time * 1000000.0));
3109  remaining_time = REFRESH_RATE;
3110  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3111  video_refresh(is, &remaining_time);
3112  SDL_PumpEvents();
3113  }
3114 }
3115 
3116 /* handle an event sent by the GUI */
3117 static void event_loop(VideoState *cur_stream)
3118 {
3119  SDL_Event event;
3120  double incr, pos, frac;
3121 
3122  for (;;) {
3123  double x;
3124  refresh_loop_wait_event(cur_stream, &event);
3125  switch (event.type) {
3126  case SDL_KEYDOWN:
3127  if (exit_on_keydown) {
3128  do_exit(cur_stream);
3129  break;
3130  }
3131  switch (event.key.keysym.sym) {
3132  case SDLK_ESCAPE:
3133  case SDLK_q:
3134  do_exit(cur_stream);
3135  break;
3136  case SDLK_f:
3137  toggle_full_screen(cur_stream);
3138  cur_stream->force_refresh = 1;
3139  break;
3140  case SDLK_p:
3141  case SDLK_SPACE:
3142  toggle_pause(cur_stream);
3143  break;
3144  case SDLK_s: // S: Step to next frame
3145  step_to_next_frame(cur_stream);
3146  break;
3147  case SDLK_a:
3149  break;
3150  case SDLK_v:
3152  break;
3153  case SDLK_t:
3155  break;
3156  case SDLK_w:
3157  toggle_audio_display(cur_stream);
3158  break;
3159  case SDLK_PAGEUP:
3160  incr = 600.0;
3161  goto do_seek;
3162  case SDLK_PAGEDOWN:
3163  incr = -600.0;
3164  goto do_seek;
3165  case SDLK_LEFT:
3166  incr = -10.0;
3167  goto do_seek;
3168  case SDLK_RIGHT:
3169  incr = 10.0;
3170  goto do_seek;
3171  case SDLK_UP:
3172  incr = 60.0;
3173  goto do_seek;
3174  case SDLK_DOWN:
3175  incr = -60.0;
3176  do_seek:
3177  if (seek_by_bytes) {
3178  if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
3179  pos = cur_stream->video_current_pos;
3180  } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
3181  pos = cur_stream->audio_pkt.pos;
3182  } else
3183  pos = avio_tell(cur_stream->ic->pb);
3184  if (cur_stream->ic->bit_rate)
3185  incr *= cur_stream->ic->bit_rate / 8.0;
3186  else
3187  incr *= 180000.0;
3188  pos += incr;
3189  stream_seek(cur_stream, pos, incr, 1);
3190  } else {
3191  pos = get_master_clock(cur_stream);
3192  if (isnan(pos))
3193  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3194  pos += incr;
3195  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3196  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3197  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3198  }
3199  break;
3200  default:
3201  break;
3202  }
3203  break;
3204  case SDL_VIDEOEXPOSE:
3205  cur_stream->force_refresh = 1;
3206  break;
3207  case SDL_MOUSEBUTTONDOWN:
3208  if (exit_on_mousedown) {
3209  do_exit(cur_stream);
3210  break;
3211  }
3212  case SDL_MOUSEMOTION:
3213  if (cursor_hidden) {
3214  SDL_ShowCursor(1);
3215  cursor_hidden = 0;
3216  }
3218  if (event.type == SDL_MOUSEBUTTONDOWN) {
3219  x = event.button.x;
3220  } else {
3221  if (event.motion.state != SDL_PRESSED)
3222  break;
3223  x = event.motion.x;
3224  }
3225  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3226  uint64_t size = avio_size(cur_stream->ic->pb);
3227  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3228  } else {
3229  int64_t ts;
3230  int ns, hh, mm, ss;
3231  int tns, thh, tmm, tss;
3232  tns = cur_stream->ic->duration / 1000000LL;
3233  thh = tns / 3600;
3234  tmm = (tns % 3600) / 60;
3235  tss = (tns % 60);
3236  frac = x / cur_stream->width;
3237  ns = frac * tns;
3238  hh = ns / 3600;
3239  mm = (ns % 3600) / 60;
3240  ss = (ns % 60);
3241  av_log(NULL, AV_LOG_INFO,
3242  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3243  hh, mm, ss, thh, tmm, tss);
3244  ts = frac * cur_stream->ic->duration;
3245  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3246  ts += cur_stream->ic->start_time;
3247  stream_seek(cur_stream, ts, 0, 0);
3248  }
3249  break;
3250  case SDL_VIDEORESIZE:
3251  screen = SDL_SetVideoMode(FFMIN(16383, event.resize.w), event.resize.h, 0,
3252  SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
3253  if (!screen) {
3254  av_log(NULL, AV_LOG_FATAL, "Failed to set video mode\n");
3255  do_exit(cur_stream);
3256  }
3257  screen_width = cur_stream->width = screen->w;
3258  screen_height = cur_stream->height = screen->h;
3259  cur_stream->force_refresh = 1;
3260  break;
3261  case SDL_QUIT:
3262  case FF_QUIT_EVENT:
3263  do_exit(cur_stream);
3264  break;
3265  case FF_ALLOC_EVENT:
3266  alloc_picture(event.user.data1);
3267  break;
3268  default:
3269  break;
3270  }
3271  }
3272 }
3273 
3274 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3275 {
3276  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3277  return opt_default(NULL, "video_size", arg);
3278 }
3279 
3280 static int opt_width(void *optctx, const char *opt, const char *arg)
3281 {
3282  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3283  return 0;
3284 }
3285 
3286 static int opt_height(void *optctx, const char *opt, const char *arg)
3287 {
3288  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3289  return 0;
3290 }
3291 
3292 static int opt_format(void *optctx, const char *opt, const char *arg)
3293 {
3294  file_iformat = av_find_input_format(arg);
3295  if (!file_iformat) {
3296  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3297  return AVERROR(EINVAL);
3298  }
3299  return 0;
3300 }
3301 
3302 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3303 {
3304  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3305  return opt_default(NULL, "pixel_format", arg);
3306 }
3307 
3308 static int opt_sync(void *optctx, const char *opt, const char *arg)
3309 {
3310  if (!strcmp(arg, "audio"))
3312  else if (!strcmp(arg, "video"))
3314  else if (!strcmp(arg, "ext"))
3316  else {
3317  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3318  exit(1);
3319  }
3320  return 0;
3321 }
3322 
3323 static int opt_seek(void *optctx, const char *opt, const char *arg)
3324 {
3325  start_time = parse_time_or_die(opt, arg, 1);
3326  return 0;
3327 }
3328 
3329 static int opt_duration(void *optctx, const char *opt, const char *arg)
3330 {
3331  duration = parse_time_or_die(opt, arg, 1);
3332  return 0;
3333 }
3334 
3335 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3336 {
3337  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3338  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3339  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3340  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3341  return 0;
3342 }
3343 
3344 static void opt_input_file(void *optctx, const char *filename)
3345 {
3346  if (input_filename) {
3347  av_log(NULL, AV_LOG_FATAL,
3348  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3349  filename, input_filename);
3350  exit(1);
3351  }
3352  if (!strcmp(filename, "-"))
3353  filename = "pipe:";
3354  input_filename = filename;
3355 }
3356 
3357 static int opt_codec(void *optctx, const char *opt, const char *arg)
3358 {
3359  const char *spec = strchr(opt, ':');
3360  if (!spec) {
3361  av_log(NULL, AV_LOG_ERROR,
3362  "No media specifier was specified in '%s' in option '%s'\n",
3363  arg, opt);
3364  return AVERROR(EINVAL);
3365  }
3366  spec++;
3367  switch (spec[0]) {
3368  case 'a' : audio_codec_name = arg; break;
3369  case 's' : subtitle_codec_name = arg; break;
3370  case 'v' : video_codec_name = arg; break;
3371  default:
3372  av_log(NULL, AV_LOG_ERROR,
3373  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3374  return AVERROR(EINVAL);
3375  }
3376  return 0;
3377 }
3378 
3379 static int dummy;
3380 
3381 static const OptionDef options[] = {
3382 #include "cmdutils_common_opts.h"
3383  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3384  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3385  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3386  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3387  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3388  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3389  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3390  { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3391  { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3392  { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3393  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3394  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3395  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3396  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3397  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3398  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3399  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3400  { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
3401  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3402  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3403  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3404  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3405  { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo", "algo" },
3406  { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options", "bit_mask" },
3407  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3408  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3409  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3410  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3411  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3412  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3413  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3414  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3415 #if CONFIG_AVFILTER
3416  { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "set video filters", "filter_graph" },
3417  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3418 #endif
3419  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3420  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3421  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3422  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3423  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3424  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3425  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3426  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3427  { NULL, },
3428 };
3429 
3430 static void show_usage(void)
3431 {
3432  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3433  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3434  av_log(NULL, AV_LOG_INFO, "\n");
3435 }
3436 
3437 void show_help_default(const char *opt, const char *arg)
3438 {
3440  show_usage();
3441  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3442  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3443  printf("\n");
3446 #if !CONFIG_AVFILTER
3448 #else
3450 #endif
3451  printf("\nWhile playing:\n"
3452  "q, ESC quit\n"
3453  "f toggle full screen\n"
3454  "p, SPC pause\n"
3455  "a cycle audio channel\n"
3456  "v cycle video channel\n"
3457  "t cycle subtitle channel\n"
3458  "w show audio waves\n"
3459  "s activate frame-step mode\n"
3460  "left/right seek backward/forward 10 seconds\n"
3461  "down/up seek backward/forward 1 minute\n"
3462  "page down/page up seek backward/forward 10 minutes\n"
3463  "mouse click seek to percentage in file corresponding to fraction of width\n"
3464  );
3465 }
3466 
3467 static int lockmgr(void **mtx, enum AVLockOp op)
3468 {
3469  switch(op) {
3470  case AV_LOCK_CREATE:
3471  *mtx = SDL_CreateMutex();
3472  if(!*mtx)
3473  return 1;
3474  return 0;
3475  case AV_LOCK_OBTAIN:
3476  return !!SDL_LockMutex(*mtx);
3477  case AV_LOCK_RELEASE:
3478  return !!SDL_UnlockMutex(*mtx);
3479  case AV_LOCK_DESTROY:
3480  SDL_DestroyMutex(*mtx);
3481  return 0;
3482  }
3483  return 1;
3484 }
3485 
3486 /* Called from the main */
3487 int main(int argc, char **argv)
3488 {
3489  int flags;
3490  VideoState *is;
3491  char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3492 
3494  parse_loglevel(argc, argv, options);
3495 
3496  /* register all codecs, demux and protocols */
3498 #if CONFIG_AVDEVICE
3500 #endif
3501 #if CONFIG_AVFILTER
3503 #endif
3504  av_register_all();
3506 
3507  init_opts();
3508 
3509  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3510  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3511 
3512  show_banner(argc, argv, options);
3513 
3514  parse_options(NULL, argc, argv, options, opt_input_file);
3515 
3516  if (!input_filename) {
3517  show_usage();
3518  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3519  av_log(NULL, AV_LOG_FATAL,
3520  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3521  exit(1);
3522  }
3523 
3524  if (display_disable) {
3525  video_disable = 1;
3526  }
3527  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3528  if (audio_disable)
3529  flags &= ~SDL_INIT_AUDIO;
3530  if (display_disable)
3531  SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3532 #if !defined(__MINGW32__) && !defined(__APPLE__)
3533  flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3534 #endif
3535  if (SDL_Init (flags)) {
3536  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3537  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3538  exit(1);
3539  }
3540 
3541  if (!display_disable) {
3542  const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3543  fs_screen_width = vi->current_w;
3544  fs_screen_height = vi->current_h;
3545  }
3546 
3547  SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3548  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3549  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3550 
3552  av_log(NULL, AV_LOG_FATAL, "Could not initialize lock manager!\n");
3553  do_exit(NULL);
3554  }
3555 
3556  av_init_packet(&flush_pkt);
3557  flush_pkt.data = (uint8_t *)&flush_pkt;
3558 
3559  is = stream_open(input_filename, file_iformat);
3560  if (!is) {
3561  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3562  do_exit(NULL);
3563  }
3564 
3565  event_loop(is);
3566 
3567  /* never returns */
3568 
3569  return 0;
3570 }