FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
34 #include "libavutil/colorspace.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/avassert.h"
42 #include "libavutil/time.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavcodec/avfft.h"
49 
50 #if CONFIG_AVFILTER
51 # include "libavfilter/avcodec.h"
52 # include "libavfilter/avfilter.h"
53 # include "libavfilter/buffersink.h"
54 # include "libavfilter/buffersrc.h"
55 #endif
56 
57 #include <SDL.h>
58 #include <SDL_thread.h>
59 
60 #include "cmdutils.h"
61 
62 #include <assert.h>
63 
64 const char program_name[] = "ffplay";
65 const int program_birth_year = 2003;
66 
67 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
68 #define MIN_FRAMES 5
69 
70 /* Minimum SDL audio buffer size, in samples. */
71 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
72 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
73 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
74 
75 /* no AV sync correction is done if below the minimum AV sync threshold */
76 #define AV_SYNC_THRESHOLD_MIN 0.04
77 /* AV sync correction is done if above the maximum AV sync threshold */
78 #define AV_SYNC_THRESHOLD_MAX 0.1
79 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
80 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
81 /* no AV correction is done if too big error */
82 #define AV_NOSYNC_THRESHOLD 10.0
83 
84 /* maximum audio speed change to get correct sync */
85 #define SAMPLE_CORRECTION_PERCENT_MAX 10
86 
87 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
88 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
89 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
90 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
91 
92 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
93 #define AUDIO_DIFF_AVG_NB 20
94 
95 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
96 #define REFRESH_RATE 0.01
97 
98 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
99 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
100 #define SAMPLE_ARRAY_SIZE (8 * 65536)
101 
102 #define CURSOR_HIDE_DELAY 1000000
103 
104 static int64_t sws_flags = SWS_BICUBIC;
105 
106 typedef struct MyAVPacketList {
109  int serial;
111 
112 typedef struct PacketQueue {
115  int size;
117  int serial;
118  SDL_mutex *mutex;
119  SDL_cond *cond;
120 } PacketQueue;
121 
122 #define VIDEO_PICTURE_QUEUE_SIZE 3
123 #define SUBPICTURE_QUEUE_SIZE 16
124 
125 typedef struct VideoPicture {
126  double pts; // presentation timestamp for this picture
127  double duration; // estimated duration based on frame rate
128  int64_t pos; // byte position in file
129  SDL_Overlay *bmp;
130  int width, height; /* source height & width */
133  int serial;
134 
136 } VideoPicture;
137 
138 typedef struct SubPicture {
139  double pts; /* presentation time stamp for this picture */
141  int serial;
142 } SubPicture;
143 
144 typedef struct AudioParams {
145  int freq;
146  int channels;
147  int64_t channel_layout;
151 } AudioParams;
152 
153 typedef struct Clock {
154  double pts; /* clock base */
155  double pts_drift; /* clock base minus time at which we updated the clock */
156  double last_updated;
157  double speed;
158  int serial; /* clock is based on a packet with this serial */
159  int paused;
160  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
161 } Clock;
162 
163 enum {
164  AV_SYNC_AUDIO_MASTER, /* default choice */
166  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
167 };
168 
169 typedef struct VideoState {
170  SDL_Thread *read_tid;
171  SDL_Thread *video_tid;
176  int paused;
179  int seek_req;
181  int64_t seek_pos;
182  int64_t seek_rel;
185  int realtime;
188 
192 
194 
196 
197  double audio_clock;
199  double audio_diff_cum; /* used for AV difference average computation */
209  unsigned int audio_buf_size; /* in bytes */
210  unsigned int audio_buf1_size;
211  int audio_buf_index; /* in bytes */
219 #if CONFIG_AVFILTER
220  struct AudioParams audio_filter_src;
221 #endif
228 
229  enum ShowMode {
231  } show_mode;
238  int xpos;
240 
241  SDL_Thread *subtitle_tid;
247  SDL_mutex *subpq_mutex;
248  SDL_cond *subpq_cond;
249 
250  double frame_timer;
256  int64_t video_current_pos; // current displayed file pos
257  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
260  SDL_mutex *pictq_mutex;
261  SDL_cond *pictq_cond;
262 #if !CONFIG_AVFILTER
264 #endif
266 
267  char filename[1024];
269  int step;
270 
271 #if CONFIG_AVFILTER
272  int vfilter_idx;
273  AVFilterContext *in_video_filter; // the first filter in the video chain
274  AVFilterContext *out_video_filter; // the last filter in the video chain
275  AVFilterContext *in_audio_filter; // the first filter in the audio chain
276  AVFilterContext *out_audio_filter; // the last filter in the audio chain
277  AVFilterGraph *agraph; // audio filter graph
278 #endif
279 
281 
283 } VideoState;
284 
285 /* options specified by the user */
287 static const char *input_filename;
288 static const char *window_title;
289 static int fs_screen_width;
290 static int fs_screen_height;
291 static int default_width = 640;
292 static int default_height = 480;
293 static int screen_width = 0;
294 static int screen_height = 0;
295 static int audio_disable;
296 static int video_disable;
297 static int subtitle_disable;
299  [AVMEDIA_TYPE_AUDIO] = -1,
300  [AVMEDIA_TYPE_VIDEO] = -1,
301  [AVMEDIA_TYPE_SUBTITLE] = -1,
302 };
303 static int seek_by_bytes = -1;
304 static int display_disable;
305 static int show_status = 1;
307 static int64_t start_time = AV_NOPTS_VALUE;
308 static int64_t duration = AV_NOPTS_VALUE;
309 static int workaround_bugs = 1;
310 static int fast = 0;
311 static int genpts = 0;
312 static int lowres = 0;
313 static int decoder_reorder_pts = -1;
314 static int autoexit;
315 static int exit_on_keydown;
316 static int exit_on_mousedown;
317 static int loop = 1;
318 static int framedrop = -1;
319 static int infinite_buffer = -1;
320 static enum ShowMode show_mode = SHOW_MODE_NONE;
321 static const char *audio_codec_name;
322 static const char *subtitle_codec_name;
323 static const char *video_codec_name;
324 double rdftspeed = 0.02;
325 static int64_t cursor_last_shown;
326 static int cursor_hidden = 0;
327 #if CONFIG_AVFILTER
328 static const char **vfilters_list = NULL;
329 static int nb_vfilters = 0;
330 static char *afilters = NULL;
331 #endif
332 static int autorotate = 1;
333 
334 /* current context */
335 static int is_full_screen;
336 static int64_t audio_callback_time;
337 
339 
340 #define FF_ALLOC_EVENT (SDL_USEREVENT)
341 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
342 
343 static SDL_Surface *screen;
344 
345 #if CONFIG_AVFILTER
346 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
347 {
348  GROW_ARRAY(vfilters_list, nb_vfilters);
349  vfilters_list[nb_vfilters - 1] = arg;
350  return 0;
351 }
352 #endif
353 
354 static inline
355 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
356  enum AVSampleFormat fmt2, int64_t channel_count2)
357 {
358  /* If channel count == 1, planar and non-planar formats are the same */
359  if (channel_count1 == 1 && channel_count2 == 1)
361  else
362  return channel_count1 != channel_count2 || fmt1 != fmt2;
363 }
364 
365 static inline
366 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
367 {
368  if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
369  return channel_layout;
370  else
371  return 0;
372 }
373 
375 {
376  MyAVPacketList *pkt1;
377 
378  if (q->abort_request)
379  return -1;
380 
381  pkt1 = av_malloc(sizeof(MyAVPacketList));
382  if (!pkt1)
383  return -1;
384  pkt1->pkt = *pkt;
385  pkt1->next = NULL;
386  if (pkt == &flush_pkt)
387  q->serial++;
388  pkt1->serial = q->serial;
389 
390  if (!q->last_pkt)
391  q->first_pkt = pkt1;
392  else
393  q->last_pkt->next = pkt1;
394  q->last_pkt = pkt1;
395  q->nb_packets++;
396  q->size += pkt1->pkt.size + sizeof(*pkt1);
397  /* XXX: should duplicate packet data in DV case */
398  SDL_CondSignal(q->cond);
399  return 0;
400 }
401 
403 {
404  int ret;
405 
406  /* duplicate the packet */
407  if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
408  return -1;
409 
410  SDL_LockMutex(q->mutex);
411  ret = packet_queue_put_private(q, pkt);
412  SDL_UnlockMutex(q->mutex);
413 
414  if (pkt != &flush_pkt && ret < 0)
415  av_free_packet(pkt);
416 
417  return ret;
418 }
419 
420 static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
421 {
422  AVPacket pkt1, *pkt = &pkt1;
423  av_init_packet(pkt);
424  pkt->data = NULL;
425  pkt->size = 0;
426  pkt->stream_index = stream_index;
427  return packet_queue_put(q, pkt);
428 }
429 
430 /* packet queue handling */
432 {
433  memset(q, 0, sizeof(PacketQueue));
434  q->mutex = SDL_CreateMutex();
435  q->cond = SDL_CreateCond();
436  q->abort_request = 1;
437 }
438 
440 {
441  MyAVPacketList *pkt, *pkt1;
442 
443  SDL_LockMutex(q->mutex);
444  for (pkt = q->first_pkt; pkt; pkt = pkt1) {
445  pkt1 = pkt->next;
446  av_free_packet(&pkt->pkt);
447  av_freep(&pkt);
448  }
449  q->last_pkt = NULL;
450  q->first_pkt = NULL;
451  q->nb_packets = 0;
452  q->size = 0;
453  SDL_UnlockMutex(q->mutex);
454 }
455 
457 {
459  SDL_DestroyMutex(q->mutex);
460  SDL_DestroyCond(q->cond);
461 }
462 
464 {
465  SDL_LockMutex(q->mutex);
466 
467  q->abort_request = 1;
468 
469  SDL_CondSignal(q->cond);
470 
471  SDL_UnlockMutex(q->mutex);
472 }
473 
475 {
476  SDL_LockMutex(q->mutex);
477  q->abort_request = 0;
478  packet_queue_put_private(q, &flush_pkt);
479  SDL_UnlockMutex(q->mutex);
480 }
481 
482 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
483 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
484 {
485  MyAVPacketList *pkt1;
486  int ret;
487 
488  SDL_LockMutex(q->mutex);
489 
490  for (;;) {
491  if (q->abort_request) {
492  ret = -1;
493  break;
494  }
495 
496  pkt1 = q->first_pkt;
497  if (pkt1) {
498  q->first_pkt = pkt1->next;
499  if (!q->first_pkt)
500  q->last_pkt = NULL;
501  q->nb_packets--;
502  q->size -= pkt1->pkt.size + sizeof(*pkt1);
503  *pkt = pkt1->pkt;
504  if (serial)
505  *serial = pkt1->serial;
506  av_free(pkt1);
507  ret = 1;
508  break;
509  } else if (!block) {
510  ret = 0;
511  break;
512  } else {
513  SDL_CondWait(q->cond, q->mutex);
514  }
515  }
516  SDL_UnlockMutex(q->mutex);
517  return ret;
518 }
519 
520 static inline void fill_rectangle(SDL_Surface *screen,
521  int x, int y, int w, int h, int color, int update)
522 {
523  SDL_Rect rect;
524  rect.x = x;
525  rect.y = y;
526  rect.w = w;
527  rect.h = h;
528  SDL_FillRect(screen, &rect, color);
529  if (update && w > 0 && h > 0)
530  SDL_UpdateRect(screen, x, y, w, h);
531 }
532 
533 /* draw only the border of a rectangle */
534 static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
535 {
536  int w1, w2, h1, h2;
537 
538  /* fill the background */
539  w1 = x;
540  if (w1 < 0)
541  w1 = 0;
542  w2 = width - (x + w);
543  if (w2 < 0)
544  w2 = 0;
545  h1 = y;
546  if (h1 < 0)
547  h1 = 0;
548  h2 = height - (y + h);
549  if (h2 < 0)
550  h2 = 0;
552  xleft, ytop,
553  w1, height,
554  color, update);
556  xleft + width - w2, ytop,
557  w2, height,
558  color, update);
560  xleft + w1, ytop,
561  width - w1 - w2, h1,
562  color, update);
564  xleft + w1, ytop + height - h2,
565  width - w1 - w2, h2,
566  color, update);
567 }
568 
569 #define ALPHA_BLEND(a, oldp, newp, s)\
570 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
571 
572 #define RGBA_IN(r, g, b, a, s)\
573 {\
574  unsigned int v = ((const uint32_t *)(s))[0];\
575  a = (v >> 24) & 0xff;\
576  r = (v >> 16) & 0xff;\
577  g = (v >> 8) & 0xff;\
578  b = v & 0xff;\
579 }
580 
581 #define YUVA_IN(y, u, v, a, s, pal)\
582 {\
583  unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
584  a = (val >> 24) & 0xff;\
585  y = (val >> 16) & 0xff;\
586  u = (val >> 8) & 0xff;\
587  v = val & 0xff;\
588 }
589 
590 #define YUVA_OUT(d, y, u, v, a)\
591 {\
592  ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
593 }
594 
595 
596 #define BPP 1
597 
598 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
599 {
600  int wrap, wrap3, width2, skip2;
601  int y, u, v, a, u1, v1, a1, w, h;
602  uint8_t *lum, *cb, *cr;
603  const uint8_t *p;
604  const uint32_t *pal;
605  int dstx, dsty, dstw, dsth;
606 
607  dstw = av_clip(rect->w, 0, imgw);
608  dsth = av_clip(rect->h, 0, imgh);
609  dstx = av_clip(rect->x, 0, imgw - dstw);
610  dsty = av_clip(rect->y, 0, imgh - dsth);
611  lum = dst->data[0] + dsty * dst->linesize[0];
612  cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
613  cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
614 
615  width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
616  skip2 = dstx >> 1;
617  wrap = dst->linesize[0];
618  wrap3 = rect->pict.linesize[0];
619  p = rect->pict.data[0];
620  pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
621 
622  if (dsty & 1) {
623  lum += dstx;
624  cb += skip2;
625  cr += skip2;
626 
627  if (dstx & 1) {
628  YUVA_IN(y, u, v, a, p, pal);
629  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
630  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
631  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
632  cb++;
633  cr++;
634  lum++;
635  p += BPP;
636  }
637  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
638  YUVA_IN(y, u, v, a, p, pal);
639  u1 = u;
640  v1 = v;
641  a1 = a;
642  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
643 
644  YUVA_IN(y, u, v, a, p + BPP, pal);
645  u1 += u;
646  v1 += v;
647  a1 += a;
648  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
649  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
650  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
651  cb++;
652  cr++;
653  p += 2 * BPP;
654  lum += 2;
655  }
656  if (w) {
657  YUVA_IN(y, u, v, a, p, pal);
658  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
659  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
660  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
661  p++;
662  lum++;
663  }
664  p += wrap3 - dstw * BPP;
665  lum += wrap - dstw - dstx;
666  cb += dst->linesize[1] - width2 - skip2;
667  cr += dst->linesize[2] - width2 - skip2;
668  }
669  for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
670  lum += dstx;
671  cb += skip2;
672  cr += skip2;
673 
674  if (dstx & 1) {
675  YUVA_IN(y, u, v, a, p, pal);
676  u1 = u;
677  v1 = v;
678  a1 = a;
679  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
680  p += wrap3;
681  lum += wrap;
682  YUVA_IN(y, u, v, a, p, pal);
683  u1 += u;
684  v1 += v;
685  a1 += a;
686  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
687  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
688  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
689  cb++;
690  cr++;
691  p += -wrap3 + BPP;
692  lum += -wrap + 1;
693  }
694  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
695  YUVA_IN(y, u, v, a, p, pal);
696  u1 = u;
697  v1 = v;
698  a1 = a;
699  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
700 
701  YUVA_IN(y, u, v, a, p + BPP, pal);
702  u1 += u;
703  v1 += v;
704  a1 += a;
705  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
706  p += wrap3;
707  lum += wrap;
708 
709  YUVA_IN(y, u, v, a, p, pal);
710  u1 += u;
711  v1 += v;
712  a1 += a;
713  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
714 
715  YUVA_IN(y, u, v, a, p + BPP, pal);
716  u1 += u;
717  v1 += v;
718  a1 += a;
719  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
720 
721  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
722  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
723 
724  cb++;
725  cr++;
726  p += -wrap3 + 2 * BPP;
727  lum += -wrap + 2;
728  }
729  if (w) {
730  YUVA_IN(y, u, v, a, p, pal);
731  u1 = u;
732  v1 = v;
733  a1 = a;
734  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
735  p += wrap3;
736  lum += wrap;
737  YUVA_IN(y, u, v, a, p, pal);
738  u1 += u;
739  v1 += v;
740  a1 += a;
741  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
742  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
743  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
744  cb++;
745  cr++;
746  p += -wrap3 + BPP;
747  lum += -wrap + 1;
748  }
749  p += wrap3 + (wrap3 - dstw * BPP);
750  lum += wrap + (wrap - dstw - dstx);
751  cb += dst->linesize[1] - width2 - skip2;
752  cr += dst->linesize[2] - width2 - skip2;
753  }
754  /* handle odd height */
755  if (h) {
756  lum += dstx;
757  cb += skip2;
758  cr += skip2;
759 
760  if (dstx & 1) {
761  YUVA_IN(y, u, v, a, p, pal);
762  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
763  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
764  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
765  cb++;
766  cr++;
767  lum++;
768  p += BPP;
769  }
770  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
771  YUVA_IN(y, u, v, a, p, pal);
772  u1 = u;
773  v1 = v;
774  a1 = a;
775  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
776 
777  YUVA_IN(y, u, v, a, p + BPP, pal);
778  u1 += u;
779  v1 += v;
780  a1 += a;
781  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
782  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
783  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
784  cb++;
785  cr++;
786  p += 2 * BPP;
787  lum += 2;
788  }
789  if (w) {
790  YUVA_IN(y, u, v, a, p, pal);
791  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
792  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
793  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
794  }
795  }
796 }
797 
798 static void free_picture(VideoPicture *vp)
799 {
800  if (vp->bmp) {
801  SDL_FreeYUVOverlay(vp->bmp);
802  vp->bmp = NULL;
803  }
804 }
805 
807 {
808  avsubtitle_free(&sp->sub);
809 }
810 
811 static void calculate_display_rect(SDL_Rect *rect,
812  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
813  int pic_width, int pic_height, AVRational pic_sar)
814 {
815  float aspect_ratio;
816  int width, height, x, y;
817 
818  if (pic_sar.num == 0)
819  aspect_ratio = 0;
820  else
821  aspect_ratio = av_q2d(pic_sar);
822 
823  if (aspect_ratio <= 0.0)
824  aspect_ratio = 1.0;
825  aspect_ratio *= (float)pic_width / (float)pic_height;
826 
827  /* XXX: we suppose the screen has a 1.0 pixel ratio */
828  height = scr_height;
829  width = ((int)rint(height * aspect_ratio)) & ~1;
830  if (width > scr_width) {
831  width = scr_width;
832  height = ((int)rint(width / aspect_ratio)) & ~1;
833  }
834  x = (scr_width - width) / 2;
835  y = (scr_height - height) / 2;
836  rect->x = scr_xleft + x;
837  rect->y = scr_ytop + y;
838  rect->w = FFMAX(width, 1);
839  rect->h = FFMAX(height, 1);
840 }
841 
843 {
844  VideoPicture *vp;
845  SubPicture *sp;
846  AVPicture pict;
847  SDL_Rect rect;
848  int i;
849 
851  if (vp->bmp) {
852  if (is->subtitle_st) {
853  if (is->subpq_size > 0) {
854  sp = &is->subpq[is->subpq_rindex];
855 
856  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
857  SDL_LockYUVOverlay (vp->bmp);
858 
859  pict.data[0] = vp->bmp->pixels[0];
860  pict.data[1] = vp->bmp->pixels[2];
861  pict.data[2] = vp->bmp->pixels[1];
862 
863  pict.linesize[0] = vp->bmp->pitches[0];
864  pict.linesize[1] = vp->bmp->pitches[2];
865  pict.linesize[2] = vp->bmp->pitches[1];
866 
867  for (i = 0; i < sp->sub.num_rects; i++)
868  blend_subrect(&pict, sp->sub.rects[i],
869  vp->bmp->w, vp->bmp->h);
870 
871  SDL_UnlockYUVOverlay (vp->bmp);
872  }
873  }
874  }
875 
876  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
877 
878  SDL_DisplayYUVOverlay(vp->bmp, &rect);
879 
880  if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
881  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
882  fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
883  is->last_display_rect = rect;
884  }
885  }
886 }
887 
888 static inline int compute_mod(int a, int b)
889 {
890  return a < 0 ? a%b + b : a%b;
891 }
892 
894 {
895  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
896  int ch, channels, h, h2, bgcolor, fgcolor;
897  int64_t time_diff;
898  int rdft_bits, nb_freq;
899 
900  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
901  ;
902  nb_freq = 1 << (rdft_bits - 1);
903 
904  /* compute display index : center on currently output samples */
905  channels = s->audio_tgt.channels;
906  nb_display_channels = channels;
907  if (!s->paused) {
908  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
909  n = 2 * channels;
910  delay = s->audio_write_buf_size;
911  delay /= n;
912 
913  /* to be more precise, we take into account the time spent since
914  the last buffer computation */
915  if (audio_callback_time) {
917  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
918  }
919 
920  delay += 2 * data_used;
921  if (delay < data_used)
922  delay = data_used;
923 
924  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
925  if (s->show_mode == SHOW_MODE_WAVES) {
926  h = INT_MIN;
927  for (i = 0; i < 1000; i += channels) {
928  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
929  int a = s->sample_array[idx];
930  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
931  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
932  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
933  int score = a - d;
934  if (h < score && (b ^ c) < 0) {
935  h = score;
936  i_start = idx;
937  }
938  }
939  }
940 
941  s->last_i_start = i_start;
942  } else {
943  i_start = s->last_i_start;
944  }
945 
946  bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
947  if (s->show_mode == SHOW_MODE_WAVES) {
949  s->xleft, s->ytop, s->width, s->height,
950  bgcolor, 0);
951 
952  fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
953 
954  /* total height for one channel */
955  h = s->height / nb_display_channels;
956  /* graph height / 2 */
957  h2 = (h * 9) / 20;
958  for (ch = 0; ch < nb_display_channels; ch++) {
959  i = i_start + ch;
960  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
961  for (x = 0; x < s->width; x++) {
962  y = (s->sample_array[i] * h2) >> 15;
963  if (y < 0) {
964  y = -y;
965  ys = y1 - y;
966  } else {
967  ys = y1;
968  }
970  s->xleft + x, ys, 1, y,
971  fgcolor, 0);
972  i += channels;
973  if (i >= SAMPLE_ARRAY_SIZE)
974  i -= SAMPLE_ARRAY_SIZE;
975  }
976  }
977 
978  fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
979 
980  for (ch = 1; ch < nb_display_channels; ch++) {
981  y = s->ytop + ch * h;
983  s->xleft, y, s->width, 1,
984  fgcolor, 0);
985  }
986  SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
987  } else {
988  nb_display_channels= FFMIN(nb_display_channels, 2);
989  if (rdft_bits != s->rdft_bits) {
990  av_rdft_end(s->rdft);
991  av_free(s->rdft_data);
992  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
993  s->rdft_bits = rdft_bits;
994  s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
995  }
996  {
997  FFTSample *data[2];
998  for (ch = 0; ch < nb_display_channels; ch++) {
999  data[ch] = s->rdft_data + 2 * nb_freq * ch;
1000  i = i_start + ch;
1001  for (x = 0; x < 2 * nb_freq; x++) {
1002  double w = (x-nb_freq) * (1.0 / nb_freq);
1003  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
1004  i += channels;
1005  if (i >= SAMPLE_ARRAY_SIZE)
1006  i -= SAMPLE_ARRAY_SIZE;
1007  }
1008  av_rdft_calc(s->rdft, data[ch]);
1009  }
1010  /* Least efficient way to do this, we should of course
1011  * directly access it but it is more than fast enough. */
1012  for (y = 0; y < s->height; y++) {
1013  double w = 1 / sqrt(nb_freq);
1014  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1015  int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
1016  + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
1017  a = FFMIN(a, 255);
1018  b = FFMIN(b, 255);
1019  fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
1020 
1022  s->xpos, s->height-y, 1, 1,
1023  fgcolor, 0);
1024  }
1025  }
1026  SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
1027  if (!s->paused)
1028  s->xpos++;
1029  if (s->xpos >= s->width)
1030  s->xpos= s->xleft;
1031  }
1032 }
1033 
1034 static void stream_close(VideoState *is)
1035 {
1036  int i;
1037  /* XXX: use a special url_shutdown call to abort parse cleanly */
1038  is->abort_request = 1;
1039  SDL_WaitThread(is->read_tid, NULL);
1043 
1044  /* free all pictures */
1045  for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
1046  free_picture(&is->pictq[i]);
1047  for (i = 0; i < SUBPICTURE_QUEUE_SIZE; i++)
1048  free_subpicture(&is->subpq[i]);
1049  SDL_DestroyMutex(is->pictq_mutex);
1050  SDL_DestroyCond(is->pictq_cond);
1051  SDL_DestroyMutex(is->subpq_mutex);
1052  SDL_DestroyCond(is->subpq_cond);
1053  SDL_DestroyCond(is->continue_read_thread);
1054 #if !CONFIG_AVFILTER
1056 #endif
1057  av_free(is);
1058 }
1059 
1060 static void do_exit(VideoState *is)
1061 {
1062  if (is) {
1063  stream_close(is);
1064  }
1065  av_lockmgr_register(NULL);
1066  uninit_opts();
1067 #if CONFIG_AVFILTER
1068  av_freep(&vfilters_list);
1069 #endif
1071  if (show_status)
1072  printf("\n");
1073  SDL_Quit();
1074  av_log(NULL, AV_LOG_QUIET, "%s", "");
1075  exit(0);
1076 }
1077 
1078 static void sigterm_handler(int sig)
1079 {
1080  exit(123);
1081 }
1082 
1084 {
1085  SDL_Rect rect;
1086  calculate_display_rect(&rect, 0, 0, INT_MAX, height, width, height, sar);
1087  default_width = rect.w;
1088  default_height = rect.h;
1089 }
1090 
1091 static int video_open(VideoState *is, int force_set_video_mode, VideoPicture *vp)
1092 {
1093  int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
1094  int w,h;
1095 
1096  if (is_full_screen) flags |= SDL_FULLSCREEN;
1097  else flags |= SDL_RESIZABLE;
1098 
1099  if (vp && vp->width)
1100  set_default_window_size(vp->width, vp->height, vp->sar);
1101 
1103  w = fs_screen_width;
1104  h = fs_screen_height;
1105  } else if (!is_full_screen && screen_width) {
1106  w = screen_width;
1107  h = screen_height;
1108  } else {
1109  w = default_width;
1110  h = default_height;
1111  }
1112  w = FFMIN(16383, w);
1113  if (screen && is->width == screen->w && screen->w == w
1114  && is->height== screen->h && screen->h == h && !force_set_video_mode)
1115  return 0;
1116  screen = SDL_SetVideoMode(w, h, 0, flags);
1117  if (!screen) {
1118  av_log(NULL, AV_LOG_FATAL, "SDL: could not set video mode - exiting\n");
1119  do_exit(is);
1120  }
1121  if (!window_title)
1123  SDL_WM_SetCaption(window_title, window_title);
1124 
1125  is->width = screen->w;
1126  is->height = screen->h;
1127 
1128  return 0;
1129 }
1130 
1131 /* display the current picture, if any */
1132 static void video_display(VideoState *is)
1133 {
1134  if (!screen)
1135  video_open(is, 0, NULL);
1136  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1137  video_audio_display(is);
1138  else if (is->video_st)
1139  video_image_display(is);
1140 }
1141 
1142 static double get_clock(Clock *c)
1143 {
1144  if (*c->queue_serial != c->serial)
1145  return NAN;
1146  if (c->paused) {
1147  return c->pts;
1148  } else {
1149  double time = av_gettime_relative() / 1000000.0;
1150  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1151  }
1152 }
1153 
1154 static void set_clock_at(Clock *c, double pts, int serial, double time)
1155 {
1156  c->pts = pts;
1157  c->last_updated = time;
1158  c->pts_drift = c->pts - time;
1159  c->serial = serial;
1160 }
1161 
1162 static void set_clock(Clock *c, double pts, int serial)
1163 {
1164  double time = av_gettime_relative() / 1000000.0;
1165  set_clock_at(c, pts, serial, time);
1166 }
1167 
1168 static void set_clock_speed(Clock *c, double speed)
1169 {
1170  set_clock(c, get_clock(c), c->serial);
1171  c->speed = speed;
1172 }
1173 
1174 static void init_clock(Clock *c, int *queue_serial)
1175 {
1176  c->speed = 1.0;
1177  c->paused = 0;
1178  c->queue_serial = queue_serial;
1179  set_clock(c, NAN, -1);
1180 }
1181 
1182 static void sync_clock_to_slave(Clock *c, Clock *slave)
1183 {
1184  double clock = get_clock(c);
1185  double slave_clock = get_clock(slave);
1186  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1187  set_clock(c, slave_clock, slave->serial);
1188 }
1189 
1191  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1192  if (is->video_st)
1193  return AV_SYNC_VIDEO_MASTER;
1194  else
1195  return AV_SYNC_AUDIO_MASTER;
1196  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1197  if (is->audio_st)
1198  return AV_SYNC_AUDIO_MASTER;
1199  else
1200  return AV_SYNC_EXTERNAL_CLOCK;
1201  } else {
1202  return AV_SYNC_EXTERNAL_CLOCK;
1203  }
1204 }
1205 
1206 /* get the current master clock value */
1207 static double get_master_clock(VideoState *is)
1208 {
1209  double val;
1210 
1211  switch (get_master_sync_type(is)) {
1212  case AV_SYNC_VIDEO_MASTER:
1213  val = get_clock(&is->vidclk);
1214  break;
1215  case AV_SYNC_AUDIO_MASTER:
1216  val = get_clock(&is->audclk);
1217  break;
1218  default:
1219  val = get_clock(&is->extclk);
1220  break;
1221  }
1222  return val;
1223 }
1224 
1226  if (is->video_stream >= 0 && is->videoq.nb_packets <= MIN_FRAMES / 2 ||
1227  is->audio_stream >= 0 && is->audioq.nb_packets <= MIN_FRAMES / 2) {
1229  } else if ((is->video_stream < 0 || is->videoq.nb_packets > MIN_FRAMES * 2) &&
1230  (is->audio_stream < 0 || is->audioq.nb_packets > MIN_FRAMES * 2)) {
1232  } else {
1233  double speed = is->extclk.speed;
1234  if (speed != 1.0)
1235  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1236  }
1237 }
1238 
1239 /* seek in the stream */
1240 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1241 {
1242  if (!is->seek_req) {
1243  is->seek_pos = pos;
1244  is->seek_rel = rel;
1245  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1246  if (seek_by_bytes)
1248  is->seek_req = 1;
1249  SDL_CondSignal(is->continue_read_thread);
1250  }
1251 }
1252 
1253 /* pause or resume the video */
1255 {
1256  if (is->paused) {
1257  is->frame_timer += av_gettime_relative() / 1000000.0 + is->vidclk.pts_drift - is->vidclk.pts;
1258  if (is->read_pause_return != AVERROR(ENOSYS)) {
1259  is->vidclk.paused = 0;
1260  }
1261  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1262  }
1263  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1264  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1265 }
1266 
1267 static void toggle_pause(VideoState *is)
1268 {
1269  stream_toggle_pause(is);
1270  is->step = 0;
1271 }
1272 
1274 {
1275  /* if the stream is paused unpause it, then step */
1276  if (is->paused)
1277  stream_toggle_pause(is);
1278  is->step = 1;
1279 }
1280 
1281 static double compute_target_delay(double delay, VideoState *is)
1282 {
1283  double sync_threshold, diff;
1284 
1285  /* update delay to follow master synchronisation source */
1287  /* if video is slave, we try to correct big delays by
1288  duplicating or deleting a frame */
1289  diff = get_clock(&is->vidclk) - get_master_clock(is);
1290 
1291  /* skip or repeat frame. We take into account the
1292  delay to compute the threshold. I still don't know
1293  if it is the best guess */
1294  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1295  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1296  if (diff <= -sync_threshold)
1297  delay = FFMAX(0, delay + diff);
1298  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1299  delay = delay + diff;
1300  else if (diff >= sync_threshold)
1301  delay = 2 * delay;
1302  }
1303  }
1304 
1305  av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1306  delay, -diff);
1307 
1308  return delay;
1309 }
1310 
1311 static double vp_duration(VideoState *is, VideoPicture *vp, VideoPicture *nextvp) {
1312  if (vp->serial == nextvp->serial) {
1313  double duration = nextvp->pts - vp->pts;
1314  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1315  return vp->duration;
1316  else
1317  return duration;
1318  } else {
1319  return 0.0;
1320  }
1321 }
1322 
1323 /* return the number of undisplayed pictures in the queue */
1325  return is->pictq_size - is->pictq_rindex_shown;
1326 }
1327 
1328 /* jump back to the previous picture if available by resetting rindex_shown */
1330  int ret = is->pictq_rindex_shown;
1331  is->pictq_rindex_shown = 0;
1332  return ret;
1333 }
1334 
1335 static void pictq_next_picture(VideoState *is) {
1336  if (!is->pictq_rindex_shown) {
1337  is->pictq_rindex_shown = 1;
1338  return;
1339  }
1340  /* update queue size and signal for next picture */
1342  is->pictq_rindex = 0;
1343 
1344  SDL_LockMutex(is->pictq_mutex);
1345  is->pictq_size--;
1346  SDL_CondSignal(is->pictq_cond);
1347  SDL_UnlockMutex(is->pictq_mutex);
1348 }
1349 
1350 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1351  /* update current video pts */
1352  set_clock(&is->vidclk, pts, serial);
1353  sync_clock_to_slave(&is->extclk, &is->vidclk);
1354  is->video_current_pos = pos;
1355 }
1356 
1357 /* called to display each frame */
1358 static void video_refresh(void *opaque, double *remaining_time)
1359 {
1360  VideoState *is = opaque;
1361  double time;
1362 
1363  SubPicture *sp, *sp2;
1364 
1365  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1367 
1368  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1369  time = av_gettime_relative() / 1000000.0;
1370  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1371  video_display(is);
1372  is->last_vis_time = time;
1373  }
1374  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1375  }
1376 
1377  if (is->video_st) {
1378  int redisplay = 0;
1379  if (is->force_refresh)
1380  redisplay = pictq_prev_picture(is);
1381 retry:
1382  if (pictq_nb_remaining(is) == 0) {
1383  // nothing to do, no picture to display in the queue
1384  } else {
1385  double last_duration, duration, delay;
1386  VideoPicture *vp, *lastvp;
1387 
1388  /* dequeue the picture */
1389  lastvp = &is->pictq[is->pictq_rindex];
1391 
1392  if (vp->serial != is->videoq.serial) {
1393  pictq_next_picture(is);
1394  is->video_current_pos = -1;
1395  redisplay = 0;
1396  goto retry;
1397  }
1398 
1399  if (lastvp->serial != vp->serial && !redisplay)
1400  is->frame_timer = av_gettime_relative() / 1000000.0;
1401 
1402  if (is->paused)
1403  goto display;
1404 
1405  /* compute nominal last_duration */
1406  last_duration = vp_duration(is, lastvp, vp);
1407  if (redisplay)
1408  delay = 0.0;
1409  else
1410  delay = compute_target_delay(last_duration, is);
1411 
1412  time= av_gettime_relative()/1000000.0;
1413  if (time < is->frame_timer + delay && !redisplay) {
1414  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1415  return;
1416  }
1417 
1418  is->frame_timer += delay;
1419  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1420  is->frame_timer = time;
1421 
1422  SDL_LockMutex(is->pictq_mutex);
1423  if (!redisplay && !isnan(vp->pts))
1424  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1425  SDL_UnlockMutex(is->pictq_mutex);
1426 
1427  if (pictq_nb_remaining(is) > 1) {
1429  duration = vp_duration(is, vp, nextvp);
1430  if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1431  if (!redisplay)
1432  is->frame_drops_late++;
1433  pictq_next_picture(is);
1434  redisplay = 0;
1435  goto retry;
1436  }
1437  }
1438 
1439  if (is->subtitle_st) {
1440  while (is->subpq_size > 0) {
1441  sp = &is->subpq[is->subpq_rindex];
1442 
1443  if (is->subpq_size > 1)
1444  sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1445  else
1446  sp2 = NULL;
1447 
1448  if (sp->serial != is->subtitleq.serial
1449  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1450  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1451  {
1452  free_subpicture(sp);
1453 
1454  /* update queue size and signal for next picture */
1455  if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1456  is->subpq_rindex = 0;
1457 
1458  SDL_LockMutex(is->subpq_mutex);
1459  is->subpq_size--;
1460  SDL_CondSignal(is->subpq_cond);
1461  SDL_UnlockMutex(is->subpq_mutex);
1462  } else {
1463  break;
1464  }
1465  }
1466  }
1467 
1468 display:
1469  /* display picture */
1470  if (!display_disable && is->show_mode == SHOW_MODE_VIDEO)
1471  video_display(is);
1472 
1473  pictq_next_picture(is);
1474 
1475  if (is->step && !is->paused)
1476  stream_toggle_pause(is);
1477  }
1478  }
1479  is->force_refresh = 0;
1480  if (show_status) {
1481  static int64_t last_time;
1482  int64_t cur_time;
1483  int aqsize, vqsize, sqsize;
1484  double av_diff;
1485 
1486  cur_time = av_gettime_relative();
1487  if (!last_time || (cur_time - last_time) >= 30000) {
1488  aqsize = 0;
1489  vqsize = 0;
1490  sqsize = 0;
1491  if (is->audio_st)
1492  aqsize = is->audioq.size;
1493  if (is->video_st)
1494  vqsize = is->videoq.size;
1495  if (is->subtitle_st)
1496  sqsize = is->subtitleq.size;
1497  av_diff = 0;
1498  if (is->audio_st && is->video_st)
1499  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1500  else if (is->video_st)
1501  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1502  else if (is->audio_st)
1503  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1504  av_log(NULL, AV_LOG_INFO,
1505  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1506  get_master_clock(is),
1507  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1508  av_diff,
1510  aqsize / 1024,
1511  vqsize / 1024,
1512  sqsize,
1515  fflush(stdout);
1516  last_time = cur_time;
1517  }
1518  }
1519 }
1520 
1521 /* allocate a picture (needs to do that in main thread to avoid
1522  potential locking problems */
1523 static void alloc_picture(VideoState *is)
1524 {
1525  VideoPicture *vp;
1526  int64_t bufferdiff;
1527 
1528  vp = &is->pictq[is->pictq_windex];
1529 
1530  free_picture(vp);
1531 
1532  video_open(is, 0, vp);
1533 
1534  vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1535  SDL_YV12_OVERLAY,
1536  screen);
1537  bufferdiff = vp->bmp ? FFMAX(vp->bmp->pixels[0], vp->bmp->pixels[1]) - FFMIN(vp->bmp->pixels[0], vp->bmp->pixels[1]) : 0;
1538  if (!vp->bmp || vp->bmp->pitches[0] < vp->width || bufferdiff < (int64_t)vp->height * vp->bmp->pitches[0]) {
1539  /* SDL allocates a buffer smaller than requested if the video
1540  * overlay hardware is unable to support the requested size. */
1541  av_log(NULL, AV_LOG_FATAL,
1542  "Error: the video system does not support an image\n"
1543  "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1544  "to reduce the image size.\n", vp->width, vp->height );
1545  do_exit(is);
1546  }
1547 
1548  SDL_LockMutex(is->pictq_mutex);
1549  vp->allocated = 1;
1550  SDL_CondSignal(is->pictq_cond);
1551  SDL_UnlockMutex(is->pictq_mutex);
1552 }
1553 
1554 static void duplicate_right_border_pixels(SDL_Overlay *bmp) {
1555  int i, width, height;
1556  Uint8 *p, *maxp;
1557  for (i = 0; i < 3; i++) {
1558  width = bmp->w;
1559  height = bmp->h;
1560  if (i > 0) {
1561  width >>= 1;
1562  height >>= 1;
1563  }
1564  if (bmp->pitches[i] > width) {
1565  maxp = bmp->pixels[i] + bmp->pitches[i] * height - 1;
1566  for (p = bmp->pixels[i] + width - 1; p < maxp; p += bmp->pitches[i])
1567  *(p+1) = *p;
1568  }
1569  }
1570 }
1571 
1572 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1573 {
1574  VideoPicture *vp;
1575 
1576 #if defined(DEBUG_SYNC) && 0
1577  printf("frame_type=%c pts=%0.3f\n",
1578  av_get_picture_type_char(src_frame->pict_type), pts);
1579 #endif
1580 
1581  /* wait until we have space to put a new picture */
1582  SDL_LockMutex(is->pictq_mutex);
1583 
1584  while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1585  !is->videoq.abort_request) {
1586  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1587  }
1588  SDL_UnlockMutex(is->pictq_mutex);
1589 
1590  if (is->videoq.abort_request)
1591  return -1;
1592 
1593  vp = &is->pictq[is->pictq_windex];
1594 
1595  vp->sar = src_frame->sample_aspect_ratio;
1596 
1597  /* alloc or resize hardware picture buffer */
1598  if (!vp->bmp || vp->reallocate || !vp->allocated ||
1599  vp->width != src_frame->width ||
1600  vp->height != src_frame->height) {
1601  SDL_Event event;
1602 
1603  vp->allocated = 0;
1604  vp->reallocate = 0;
1605  vp->width = src_frame->width;
1606  vp->height = src_frame->height;
1607 
1608  /* the allocation must be done in the main thread to avoid
1609  locking problems. */
1610  event.type = FF_ALLOC_EVENT;
1611  event.user.data1 = is;
1612  SDL_PushEvent(&event);
1613 
1614  /* wait until the picture is allocated */
1615  SDL_LockMutex(is->pictq_mutex);
1616  while (!vp->allocated && !is->videoq.abort_request) {
1617  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1618  }
1619  /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1620  if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1621  while (!vp->allocated && !is->abort_request) {
1622  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1623  }
1624  }
1625  SDL_UnlockMutex(is->pictq_mutex);
1626 
1627  if (is->videoq.abort_request)
1628  return -1;
1629  }
1630 
1631  /* if the frame is not skipped, then display it */
1632  if (vp->bmp) {
1633  AVPicture pict = { { 0 } };
1634 
1635  /* get a pointer on the bitmap */
1636  SDL_LockYUVOverlay (vp->bmp);
1637 
1638  pict.data[0] = vp->bmp->pixels[0];
1639  pict.data[1] = vp->bmp->pixels[2];
1640  pict.data[2] = vp->bmp->pixels[1];
1641 
1642  pict.linesize[0] = vp->bmp->pitches[0];
1643  pict.linesize[1] = vp->bmp->pitches[2];
1644  pict.linesize[2] = vp->bmp->pitches[1];
1645 
1646 #if CONFIG_AVFILTER
1647  // FIXME use direct rendering
1648  av_picture_copy(&pict, (AVPicture *)src_frame,
1649  src_frame->format, vp->width, vp->height);
1650 #else
1651  av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1653  vp->width, vp->height, src_frame->format, vp->width, vp->height,
1654  AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1655  if (!is->img_convert_ctx) {
1656  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1657  exit(1);
1658  }
1659  sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1660  0, vp->height, pict.data, pict.linesize);
1661 #endif
1662  /* workaround SDL PITCH_WORKAROUND */
1664  /* update the bitmap content */
1665  SDL_UnlockYUVOverlay(vp->bmp);
1666 
1667  vp->pts = pts;
1668  vp->duration = duration;
1669  vp->pos = pos;
1670  vp->serial = serial;
1671 
1672  /* now we can update the picture count */
1674  is->pictq_windex = 0;
1675  SDL_LockMutex(is->pictq_mutex);
1676  is->pictq_size++;
1677  SDL_UnlockMutex(is->pictq_mutex);
1678  }
1679  return 0;
1680 }
1681 
1682 static int get_video_frame(VideoState *is, AVFrame *frame, AVPacket *pkt, int *serial)
1683 {
1684  int got_picture;
1685 
1686  if (packet_queue_get(&is->videoq, pkt, 1, serial) < 0)
1687  return -1;
1688 
1689  if (pkt->data == flush_pkt.data) {
1691  return 0;
1692  }
1693 
1694  if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
1695  return 0;
1696 
1697  if (!got_picture && !pkt->data)
1698  is->video_finished = *serial;
1699 
1700  if (got_picture) {
1701  int ret = 1;
1702  double dpts = NAN;
1703 
1704  if (decoder_reorder_pts == -1) {
1705  frame->pts = av_frame_get_best_effort_timestamp(frame);
1706  } else if (decoder_reorder_pts) {
1707  frame->pts = frame->pkt_pts;
1708  } else {
1709  frame->pts = frame->pkt_dts;
1710  }
1711 
1712  if (frame->pts != AV_NOPTS_VALUE)
1713  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1714 
1715  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1716 
1718  if (frame->pts != AV_NOPTS_VALUE) {
1719  double diff = dpts - get_master_clock(is);
1720  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1721  diff - is->frame_last_filter_delay < 0 &&
1722  *serial == is->vidclk.serial &&
1723  is->videoq.nb_packets) {
1724  is->frame_drops_early++;
1725  av_frame_unref(frame);
1726  ret = 0;
1727  }
1728  }
1729  }
1730 
1731  return ret;
1732  }
1733  return 0;
1734 }
1735 
1736 #if CONFIG_AVFILTER
1737 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1738  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1739 {
1740  int ret, i;
1741  int nb_filters = graph->nb_filters;
1742  AVFilterInOut *outputs = NULL, *inputs = NULL;
1743 
1744  if (filtergraph) {
1745  outputs = avfilter_inout_alloc();
1746  inputs = avfilter_inout_alloc();
1747  if (!outputs || !inputs) {
1748  ret = AVERROR(ENOMEM);
1749  goto fail;
1750  }
1751 
1752  outputs->name = av_strdup("in");
1753  outputs->filter_ctx = source_ctx;
1754  outputs->pad_idx = 0;
1755  outputs->next = NULL;
1756 
1757  inputs->name = av_strdup("out");
1758  inputs->filter_ctx = sink_ctx;
1759  inputs->pad_idx = 0;
1760  inputs->next = NULL;
1761 
1762  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1763  goto fail;
1764  } else {
1765  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1766  goto fail;
1767  }
1768 
1769  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1770  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1771  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1772 
1773  ret = avfilter_graph_config(graph, NULL);
1774 fail:
1775  avfilter_inout_free(&outputs);
1776  avfilter_inout_free(&inputs);
1777  return ret;
1778 }
1779 
1780 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1781 {
1782  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1783  char sws_flags_str[128];
1784  char buffersrc_args[256];
1785  int ret;
1786  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1787  AVCodecContext *codec = is->video_st->codec;
1788  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1789 
1790  av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1791  snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1792  graph->scale_sws_opts = av_strdup(sws_flags_str);
1793 
1794  snprintf(buffersrc_args, sizeof(buffersrc_args),
1795  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1796  frame->width, frame->height, frame->format,
1798  codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1799  if (fr.num && fr.den)
1800  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1801 
1802  if ((ret = avfilter_graph_create_filter(&filt_src,
1803  avfilter_get_by_name("buffer"),
1804  "ffplay_buffer", buffersrc_args, NULL,
1805  graph)) < 0)
1806  goto fail;
1807 
1808  ret = avfilter_graph_create_filter(&filt_out,
1809  avfilter_get_by_name("buffersink"),
1810  "ffplay_buffersink", NULL, NULL, graph);
1811  if (ret < 0)
1812  goto fail;
1813 
1814  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1815  goto fail;
1816 
1817  last_filter = filt_out;
1818 
1819 /* Note: this macro adds a filter before the lastly added filter, so the
1820  * processing order of the filters is in reverse */
1821 #define INSERT_FILT(name, arg) do { \
1822  AVFilterContext *filt_ctx; \
1823  \
1824  ret = avfilter_graph_create_filter(&filt_ctx, \
1825  avfilter_get_by_name(name), \
1826  "ffplay_" name, arg, NULL, graph); \
1827  if (ret < 0) \
1828  goto fail; \
1829  \
1830  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1831  if (ret < 0) \
1832  goto fail; \
1833  \
1834  last_filter = filt_ctx; \
1835 } while (0)
1836 
1837  /* SDL YUV code is not handling odd width/height for some driver
1838  * combinations, therefore we crop the picture to an even width/height. */
1839  INSERT_FILT("crop", "floor(in_w/2)*2:floor(in_h/2)*2");
1840 
1841  if (autorotate) {
1842  AVDictionaryEntry *rotate_tag = av_dict_get(is->video_st->metadata, "rotate", NULL, 0);
1843  if (rotate_tag && *rotate_tag->value && strcmp(rotate_tag->value, "0")) {
1844  if (!strcmp(rotate_tag->value, "90")) {
1845  INSERT_FILT("transpose", "clock");
1846  } else if (!strcmp(rotate_tag->value, "180")) {
1847  INSERT_FILT("hflip", NULL);
1848  INSERT_FILT("vflip", NULL);
1849  } else if (!strcmp(rotate_tag->value, "270")) {
1850  INSERT_FILT("transpose", "cclock");
1851  } else {
1852  char rotate_buf[64];
1853  snprintf(rotate_buf, sizeof(rotate_buf), "%s*PI/180", rotate_tag->value);
1854  INSERT_FILT("rotate", rotate_buf);
1855  }
1856  }
1857  }
1858 
1859  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1860  goto fail;
1861 
1862  is->in_video_filter = filt_src;
1863  is->out_video_filter = filt_out;
1864 
1865 fail:
1866  return ret;
1867 }
1868 
1869 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1870 {
1872  int sample_rates[2] = { 0, -1 };
1873  int64_t channel_layouts[2] = { 0, -1 };
1874  int channels[2] = { 0, -1 };
1875  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1876  char aresample_swr_opts[512] = "";
1877  AVDictionaryEntry *e = NULL;
1878  char asrc_args[256];
1879  int ret;
1880 
1881  avfilter_graph_free(&is->agraph);
1882  if (!(is->agraph = avfilter_graph_alloc()))
1883  return AVERROR(ENOMEM);
1884 
1885  while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
1886  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
1887  if (strlen(aresample_swr_opts))
1888  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
1889  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
1890 
1891  ret = snprintf(asrc_args, sizeof(asrc_args),
1892  "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1893  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1894  is->audio_filter_src.channels,
1895  1, is->audio_filter_src.freq);
1896  if (is->audio_filter_src.channel_layout)
1897  snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1898  ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
1899 
1900  ret = avfilter_graph_create_filter(&filt_asrc,
1901  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1902  asrc_args, NULL, is->agraph);
1903  if (ret < 0)
1904  goto end;
1905 
1906 
1907  ret = avfilter_graph_create_filter(&filt_asink,
1908  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1909  NULL, NULL, is->agraph);
1910  if (ret < 0)
1911  goto end;
1912 
1913  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1914  goto end;
1915  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1916  goto end;
1917 
1918  if (force_output_format) {
1919  channel_layouts[0] = is->audio_tgt.channel_layout;
1920  channels [0] = is->audio_tgt.channels;
1921  sample_rates [0] = is->audio_tgt.freq;
1922  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
1923  goto end;
1924  if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1925  goto end;
1926  if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1927  goto end;
1928  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1929  goto end;
1930  }
1931 
1932 
1933  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
1934  goto end;
1935 
1936  is->in_audio_filter = filt_asrc;
1937  is->out_audio_filter = filt_asink;
1938 
1939 end:
1940  if (ret < 0)
1941  avfilter_graph_free(&is->agraph);
1942  return ret;
1943 }
1944 #endif /* CONFIG_AVFILTER */
1945 
1946 static int video_thread(void *arg)
1947 {
1948  AVPacket pkt = { 0 };
1949  VideoState *is = arg;
1950  AVFrame *frame = av_frame_alloc();
1951  double pts;
1952  double duration;
1953  int ret;
1954  int serial = 0;
1956  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
1957 
1958 #if CONFIG_AVFILTER
1960  AVFilterContext *filt_out = NULL, *filt_in = NULL;
1961  int last_w = 0;
1962  int last_h = 0;
1963  enum AVPixelFormat last_format = -2;
1964  int last_serial = -1;
1965  int last_vfilter_idx = 0;
1966 #endif
1967 
1968  for (;;) {
1969  while (is->paused && !is->videoq.abort_request)
1970  SDL_Delay(10);
1971 
1972  av_free_packet(&pkt);
1973 
1974  ret = get_video_frame(is, frame, &pkt, &serial);
1975  if (ret < 0)
1976  goto the_end;
1977  if (!ret)
1978  continue;
1979 
1980 #if CONFIG_AVFILTER
1981  if ( last_w != frame->width
1982  || last_h != frame->height
1983  || last_format != frame->format
1984  || last_serial != serial
1985  || last_vfilter_idx != is->vfilter_idx) {
1986  av_log(NULL, AV_LOG_DEBUG,
1987  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
1988  last_w, last_h,
1989  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
1990  frame->width, frame->height,
1991  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), serial);
1992  avfilter_graph_free(&graph);
1993  graph = avfilter_graph_alloc();
1994  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
1995  SDL_Event event;
1996  event.type = FF_QUIT_EVENT;
1997  event.user.data1 = is;
1998  SDL_PushEvent(&event);
1999  goto the_end;
2000  }
2001  filt_in = is->in_video_filter;
2002  filt_out = is->out_video_filter;
2003  last_w = frame->width;
2004  last_h = frame->height;
2005  last_format = frame->format;
2006  last_serial = serial;
2007  last_vfilter_idx = is->vfilter_idx;
2008  frame_rate = filt_out->inputs[0]->frame_rate;
2009  }
2010 
2011  ret = av_buffersrc_add_frame(filt_in, frame);
2012  if (ret < 0)
2013  goto the_end;
2014 
2015  while (ret >= 0) {
2016  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2017 
2018  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2019  if (ret < 0) {
2020  if (ret == AVERROR_EOF)
2021  is->video_finished = serial;
2022  ret = 0;
2023  break;
2024  }
2025 
2027  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2028  is->frame_last_filter_delay = 0;
2029  tb = filt_out->inputs[0]->time_base;
2030 #endif
2031  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2032  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2033  ret = queue_picture(is, frame, pts, duration, av_frame_get_pkt_pos(frame), serial);
2034  av_frame_unref(frame);
2035 #if CONFIG_AVFILTER
2036  }
2037 #endif
2038 
2039  if (ret < 0)
2040  goto the_end;
2041  }
2042  the_end:
2043 #if CONFIG_AVFILTER
2044  avfilter_graph_free(&graph);
2045 #endif
2046  av_free_packet(&pkt);
2047  av_frame_free(&frame);
2048  return 0;
2049 }
2050 
2051 static int subtitle_thread(void *arg)
2052 {
2053  VideoState *is = arg;
2054  SubPicture *sp;
2055  AVPacket pkt1, *pkt = &pkt1;
2056  int got_subtitle;
2057  int serial;
2058  double pts;
2059  int i, j;
2060  int r, g, b, y, u, v, a;
2061 
2062  for (;;) {
2063  while (is->paused && !is->subtitleq.abort_request) {
2064  SDL_Delay(10);
2065  }
2066  if (packet_queue_get(&is->subtitleq, pkt, 1, &serial) < 0)
2067  break;
2068 
2069  if (pkt->data == flush_pkt.data) {
2071  continue;
2072  }
2073  SDL_LockMutex(is->subpq_mutex);
2074  while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
2075  !is->subtitleq.abort_request) {
2076  SDL_CondWait(is->subpq_cond, is->subpq_mutex);
2077  }
2078  SDL_UnlockMutex(is->subpq_mutex);
2079 
2080  if (is->subtitleq.abort_request)
2081  return 0;
2082 
2083  sp = &is->subpq[is->subpq_windex];
2084 
2085  /* NOTE: ipts is the PTS of the _first_ picture beginning in
2086  this packet, if any */
2087  pts = 0;
2088  if (pkt->pts != AV_NOPTS_VALUE)
2089  pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
2090 
2092  &got_subtitle, pkt);
2093  if (got_subtitle && sp->sub.format == 0) {
2094  if (sp->sub.pts != AV_NOPTS_VALUE)
2095  pts = sp->sub.pts / (double)AV_TIME_BASE;
2096  sp->pts = pts;
2097  sp->serial = serial;
2098 
2099  for (i = 0; i < sp->sub.num_rects; i++)
2100  {
2101  for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
2102  {
2103  RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
2104  y = RGB_TO_Y_CCIR(r, g, b);
2105  u = RGB_TO_U_CCIR(r, g, b, 0);
2106  v = RGB_TO_V_CCIR(r, g, b, 0);
2107  YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
2108  }
2109  }
2110 
2111  /* now we can update the picture count */
2112  if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
2113  is->subpq_windex = 0;
2114  SDL_LockMutex(is->subpq_mutex);
2115  is->subpq_size++;
2116  SDL_UnlockMutex(is->subpq_mutex);
2117  } else if (got_subtitle) {
2118  avsubtitle_free(&sp->sub);
2119  }
2120  av_free_packet(pkt);
2121  }
2122  return 0;
2123 }
2124 
2125 /* copy samples for viewing in editor window */
2126 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2127 {
2128  int size, len;
2129 
2130  size = samples_size / sizeof(short);
2131  while (size > 0) {
2133  if (len > size)
2134  len = size;
2135  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2136  samples += len;
2137  is->sample_array_index += len;
2139  is->sample_array_index = 0;
2140  size -= len;
2141  }
2142 }
2143 
2144 /* return the wanted number of samples to get better sync if sync_type is video
2145  * or external master clock */
2146 static int synchronize_audio(VideoState *is, int nb_samples)
2147 {
2148  int wanted_nb_samples = nb_samples;
2149 
2150  /* if not master, then we try to remove or add samples to correct the clock */
2152  double diff, avg_diff;
2153  int min_nb_samples, max_nb_samples;
2154 
2155  diff = get_clock(&is->audclk) - get_master_clock(is);
2156 
2157  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2158  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2160  /* not enough measures to have a correct estimate */
2161  is->audio_diff_avg_count++;
2162  } else {
2163  /* estimate the A-V difference */
2164  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2165 
2166  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2167  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2168  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2169  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2170  wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2171  }
2172  av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2173  diff, avg_diff, wanted_nb_samples - nb_samples,
2175  }
2176  } else {
2177  /* too big difference : may be initial PTS errors, so
2178  reset A-V filter */
2179  is->audio_diff_avg_count = 0;
2180  is->audio_diff_cum = 0;
2181  }
2182  }
2183 
2184  return wanted_nb_samples;
2185 }
2186 
2187 /**
2188  * Decode one audio frame and return its uncompressed size.
2189  *
2190  * The processed audio frame is decoded, converted if required, and
2191  * stored in is->audio_buf, with size in bytes given by the return
2192  * value.
2193  */
2195 {
2196  AVPacket *pkt_temp = &is->audio_pkt_temp;
2197  AVPacket *pkt = &is->audio_pkt;
2198  AVCodecContext *dec = is->audio_st->codec;
2199  int len1, data_size, resampled_data_size;
2200  int64_t dec_channel_layout;
2201  int got_frame;
2202  av_unused double audio_clock0;
2203  int wanted_nb_samples;
2204  AVRational tb;
2205  int ret;
2206  int reconfigure;
2207 
2208  for (;;) {
2209  /* NOTE: the audio packet can contain several frames */
2210  while (pkt_temp->stream_index != -1 || is->audio_buf_frames_pending) {
2211  if (!is->frame) {
2212  if (!(is->frame = av_frame_alloc()))
2213  return AVERROR(ENOMEM);
2214  } else {
2215  av_frame_unref(is->frame);
2216  }
2217 
2218  if (is->audioq.serial != is->audio_pkt_temp_serial)
2219  break;
2220 
2221  if (is->paused)
2222  return -1;
2223 
2224  if (!is->audio_buf_frames_pending) {
2225  len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2226  if (len1 < 0) {
2227  /* if error, we skip the frame */
2228  pkt_temp->size = 0;
2229  break;
2230  }
2231 
2232  pkt_temp->dts =
2233  pkt_temp->pts = AV_NOPTS_VALUE;
2234  pkt_temp->data += len1;
2235  pkt_temp->size -= len1;
2236  if (pkt_temp->data && pkt_temp->size <= 0 || !pkt_temp->data && !got_frame)
2237  pkt_temp->stream_index = -1;
2238  if (!pkt_temp->data && !got_frame)
2240 
2241  if (!got_frame)
2242  continue;
2243 
2244  tb = (AVRational){1, is->frame->sample_rate};
2245  if (is->frame->pts != AV_NOPTS_VALUE)
2246  is->frame->pts = av_rescale_q(is->frame->pts, dec->time_base, tb);
2247  else if (is->frame->pkt_pts != AV_NOPTS_VALUE)
2248  is->frame->pts = av_rescale_q(is->frame->pkt_pts, is->audio_st->time_base, tb);
2249  else if (is->audio_frame_next_pts != AV_NOPTS_VALUE)
2250 #if CONFIG_AVFILTER
2251  is->frame->pts = av_rescale_q(is->audio_frame_next_pts, (AVRational){1, is->audio_filter_src.freq}, tb);
2252 #else
2254 #endif
2255 
2256  if (is->frame->pts != AV_NOPTS_VALUE)
2257  is->audio_frame_next_pts = is->frame->pts + is->frame->nb_samples;
2258 
2259 #if CONFIG_AVFILTER
2260  dec_channel_layout = get_valid_channel_layout(is->frame->channel_layout, av_frame_get_channels(is->frame));
2261 
2262  reconfigure =
2263  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2264  is->frame->format, av_frame_get_channels(is->frame)) ||
2265  is->audio_filter_src.channel_layout != dec_channel_layout ||
2266  is->audio_filter_src.freq != is->frame->sample_rate ||
2268 
2269  if (reconfigure) {
2270  char buf1[1024], buf2[1024];
2271  av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2272  av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2273  av_log(NULL, AV_LOG_DEBUG,
2274  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2275  is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, is->audio_last_serial,
2277 
2278  is->audio_filter_src.fmt = is->frame->format;
2279  is->audio_filter_src.channels = av_frame_get_channels(is->frame);
2280  is->audio_filter_src.channel_layout = dec_channel_layout;
2281  is->audio_filter_src.freq = is->frame->sample_rate;
2283 
2284  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2285  return ret;
2286  }
2287 
2288  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, is->frame)) < 0)
2289  return ret;
2290 #endif
2291  }
2292 #if CONFIG_AVFILTER
2293  if ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, is->frame, 0)) < 0) {
2294  if (ret == AVERROR(EAGAIN)) {
2295  is->audio_buf_frames_pending = 0;
2296  continue;
2297  }
2298  if (ret == AVERROR_EOF)
2300  return ret;
2301  }
2302  is->audio_buf_frames_pending = 1;
2303  tb = is->out_audio_filter->inputs[0]->time_base;
2304 #endif
2305 
2307  is->frame->nb_samples,
2308  is->frame->format, 1);
2309 
2310  dec_channel_layout =
2313  wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
2314 
2315  if (is->frame->format != is->audio_src.fmt ||
2316  dec_channel_layout != is->audio_src.channel_layout ||
2317  is->frame->sample_rate != is->audio_src.freq ||
2318  (wanted_nb_samples != is->frame->nb_samples && !is->swr_ctx)) {
2319  swr_free(&is->swr_ctx);
2320  is->swr_ctx = swr_alloc_set_opts(NULL,
2322  dec_channel_layout, is->frame->format, is->frame->sample_rate,
2323  0, NULL);
2324  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2325  av_log(NULL, AV_LOG_ERROR,
2326  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2329  break;
2330  }
2331  is->audio_src.channel_layout = dec_channel_layout;
2333  is->audio_src.freq = is->frame->sample_rate;
2334  is->audio_src.fmt = is->frame->format;
2335  }
2336 
2337  if (is->swr_ctx) {
2338  const uint8_t **in = (const uint8_t **)is->frame->extended_data;
2339  uint8_t **out = &is->audio_buf1;
2340  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate + 256;
2341  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2342  int len2;
2343  if (out_size < 0) {
2344  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2345  break;
2346  }
2347  if (wanted_nb_samples != is->frame->nb_samples) {
2348  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / is->frame->sample_rate,
2349  wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate) < 0) {
2350  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2351  break;
2352  }
2353  }
2354  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2355  if (!is->audio_buf1)
2356  return AVERROR(ENOMEM);
2357  len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples);
2358  if (len2 < 0) {
2359  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2360  break;
2361  }
2362  if (len2 == out_count) {
2363  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2364  swr_init(is->swr_ctx);
2365  }
2366  is->audio_buf = is->audio_buf1;
2367  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2368  } else {
2369  is->audio_buf = is->frame->data[0];
2370  resampled_data_size = data_size;
2371  }
2372 
2373  audio_clock0 = is->audio_clock;
2374  /* update the audio clock with the pts */
2375  if (is->frame->pts != AV_NOPTS_VALUE)
2376  is->audio_clock = is->frame->pts * av_q2d(tb) + (double) is->frame->nb_samples / is->frame->sample_rate;
2377  else
2378  is->audio_clock = NAN;
2380 #ifdef DEBUG
2381  {
2382  static double last_clock;
2383  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2384  is->audio_clock - last_clock,
2385  is->audio_clock, audio_clock0);
2386  last_clock = is->audio_clock;
2387  }
2388 #endif
2389  return resampled_data_size;
2390  }
2391 
2392  /* free the current packet */
2393  if (pkt->data)
2395  memset(pkt_temp, 0, sizeof(*pkt_temp));
2396  pkt_temp->stream_index = -1;
2397 
2398  if (is->audioq.abort_request) {
2399  return -1;
2400  }
2401 
2402  if (is->audioq.nb_packets == 0)
2403  SDL_CondSignal(is->continue_read_thread);
2404 
2405  /* read next packet */
2406  if ((packet_queue_get(&is->audioq, pkt, 1, &is->audio_pkt_temp_serial)) < 0)
2407  return -1;
2408 
2409  if (pkt->data == flush_pkt.data) {
2410  avcodec_flush_buffers(dec);
2411  is->audio_buf_frames_pending = 0;
2415  }
2416 
2417  *pkt_temp = *pkt;
2418  }
2419 }
2420 
2421 /* prepare a new audio buffer */
2422 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2423 {
2424  VideoState *is = opaque;
2425  int audio_size, len1;
2426 
2428 
2429  while (len > 0) {
2430  if (is->audio_buf_index >= is->audio_buf_size) {
2431  audio_size = audio_decode_frame(is);
2432  if (audio_size < 0) {
2433  /* if error, just output silence */
2434  is->audio_buf = is->silence_buf;
2435  is->audio_buf_size = sizeof(is->silence_buf) / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2436  } else {
2437  if (is->show_mode != SHOW_MODE_VIDEO)
2438  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2439  is->audio_buf_size = audio_size;
2440  }
2441  is->audio_buf_index = 0;
2442  }
2443  len1 = is->audio_buf_size - is->audio_buf_index;
2444  if (len1 > len)
2445  len1 = len;
2446  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2447  len -= len1;
2448  stream += len1;
2449  is->audio_buf_index += len1;
2450  }
2452  /* Let's assume the audio driver that is used by SDL has two periods. */
2453  if (!isnan(is->audio_clock)) {
2455  sync_clock_to_slave(&is->extclk, &is->audclk);
2456  }
2457 }
2458 
2459 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2460 {
2461  SDL_AudioSpec wanted_spec, spec;
2462  const char *env;
2463  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2464  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2465  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2466 
2467  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2468  if (env) {
2469  wanted_nb_channels = atoi(env);
2470  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2471  }
2472  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2473  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2474  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2475  }
2476  wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2477  wanted_spec.channels = wanted_nb_channels;
2478  wanted_spec.freq = wanted_sample_rate;
2479  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2480  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2481  return -1;
2482  }
2483  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2484  next_sample_rate_idx--;
2485  wanted_spec.format = AUDIO_S16SYS;
2486  wanted_spec.silence = 0;
2487  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2488  wanted_spec.callback = sdl_audio_callback;
2489  wanted_spec.userdata = opaque;
2490  while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2491  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2492  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2493  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2494  if (!wanted_spec.channels) {
2495  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2496  wanted_spec.channels = wanted_nb_channels;
2497  if (!wanted_spec.freq) {
2498  av_log(NULL, AV_LOG_ERROR,
2499  "No more combinations to try, audio open failed\n");
2500  return -1;
2501  }
2502  }
2503  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2504  }
2505  if (spec.format != AUDIO_S16SYS) {
2506  av_log(NULL, AV_LOG_ERROR,
2507  "SDL advised audio format %d is not supported!\n", spec.format);
2508  return -1;
2509  }
2510  if (spec.channels != wanted_spec.channels) {
2511  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2512  if (!wanted_channel_layout) {
2513  av_log(NULL, AV_LOG_ERROR,
2514  "SDL advised channel count %d is not supported!\n", spec.channels);
2515  return -1;
2516  }
2517  }
2518 
2519  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2520  audio_hw_params->freq = spec.freq;
2521  audio_hw_params->channel_layout = wanted_channel_layout;
2522  audio_hw_params->channels = spec.channels;
2523  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
2524  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2525  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2526  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2527  return -1;
2528  }
2529  return spec.size;
2530 }
2531 
2532 /* open a given stream. Return 0 if OK */
2533 static int stream_component_open(VideoState *is, int stream_index)
2534 {
2535  AVFormatContext *ic = is->ic;
2536  AVCodecContext *avctx;
2537  AVCodec *codec;
2538  const char *forced_codec_name = NULL;
2539  AVDictionary *opts;
2540  AVDictionaryEntry *t = NULL;
2541  int sample_rate, nb_channels;
2542  int64_t channel_layout;
2543  int ret;
2544  int stream_lowres = lowres;
2545 
2546  if (stream_index < 0 || stream_index >= ic->nb_streams)
2547  return -1;
2548  avctx = ic->streams[stream_index]->codec;
2549 
2550  codec = avcodec_find_decoder(avctx->codec_id);
2551 
2552  switch(avctx->codec_type){
2553  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2554  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2555  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2556  }
2557  if (forced_codec_name)
2558  codec = avcodec_find_decoder_by_name(forced_codec_name);
2559  if (!codec) {
2560  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2561  "No codec could be found with name '%s'\n", forced_codec_name);
2562  else av_log(NULL, AV_LOG_WARNING,
2563  "No codec could be found with id %d\n", avctx->codec_id);
2564  return -1;
2565  }
2566 
2567  avctx->codec_id = codec->id;
2569  if(stream_lowres > av_codec_get_max_lowres(codec)){
2570  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2571  av_codec_get_max_lowres(codec));
2572  stream_lowres = av_codec_get_max_lowres(codec);
2573  }
2574  av_codec_set_lowres(avctx, stream_lowres);
2575 
2576  if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2577  if (fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2578  if(codec->capabilities & CODEC_CAP_DR1)
2579  avctx->flags |= CODEC_FLAG_EMU_EDGE;
2580 
2581  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2582  if (!av_dict_get(opts, "threads", NULL, 0))
2583  av_dict_set(&opts, "threads", "auto", 0);
2584  if (stream_lowres)
2585  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2586  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2587  av_dict_set(&opts, "refcounted_frames", "1", 0);
2588  if (avcodec_open2(avctx, codec, &opts) < 0)
2589  return -1;
2590  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2591  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2592  return AVERROR_OPTION_NOT_FOUND;
2593  }
2594 
2595  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2596  switch (avctx->codec_type) {
2597  case AVMEDIA_TYPE_AUDIO:
2598 #if CONFIG_AVFILTER
2599  {
2600  AVFilterLink *link;
2601 
2602  is->audio_filter_src.freq = avctx->sample_rate;
2603  is->audio_filter_src.channels = avctx->channels;
2604  is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2605  is->audio_filter_src.fmt = avctx->sample_fmt;
2606  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2607  return ret;
2608  link = is->out_audio_filter->inputs[0];
2609  sample_rate = link->sample_rate;
2610  nb_channels = link->channels;
2611  channel_layout = link->channel_layout;
2612  }
2613 #else
2614  sample_rate = avctx->sample_rate;
2615  nb_channels = avctx->channels;
2616  channel_layout = avctx->channel_layout;
2617 #endif
2618 
2619  /* prepare audio output */
2620  if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2621  return ret;
2622  is->audio_hw_buf_size = ret;
2623  is->audio_src = is->audio_tgt;
2624  is->audio_buf_size = 0;
2625  is->audio_buf_index = 0;
2626 
2627  /* init averaging filter */
2628  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2629  is->audio_diff_avg_count = 0;
2630  /* since we do not have a precise anough audio fifo fullness,
2631  we correct audio sync only if larger than this threshold */
2633 
2634  memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2635  memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2636  is->audio_pkt_temp.stream_index = -1;
2637 
2638  is->audio_stream = stream_index;
2639  is->audio_st = ic->streams[stream_index];
2640 
2641  packet_queue_start(&is->audioq);
2642  SDL_PauseAudio(0);
2643  break;
2644  case AVMEDIA_TYPE_VIDEO:
2645  is->video_stream = stream_index;
2646  is->video_st = ic->streams[stream_index];
2647 
2648  packet_queue_start(&is->videoq);
2649  is->video_tid = SDL_CreateThread(video_thread, is);
2650  is->queue_attachments_req = 1;
2651  break;
2652  case AVMEDIA_TYPE_SUBTITLE:
2653  is->subtitle_stream = stream_index;
2654  is->subtitle_st = ic->streams[stream_index];
2656 
2657  is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2658  break;
2659  default:
2660  break;
2661  }
2662  return 0;
2663 }
2664 
2665 static void stream_component_close(VideoState *is, int stream_index)
2666 {
2667  AVFormatContext *ic = is->ic;
2668  AVCodecContext *avctx;
2669 
2670  if (stream_index < 0 || stream_index >= ic->nb_streams)
2671  return;
2672  avctx = ic->streams[stream_index]->codec;
2673 
2674  switch (avctx->codec_type) {
2675  case AVMEDIA_TYPE_AUDIO:
2676  packet_queue_abort(&is->audioq);
2677 
2678  SDL_CloseAudio();
2679 
2680  packet_queue_flush(&is->audioq);
2681  av_free_packet(&is->audio_pkt);
2682  swr_free(&is->swr_ctx);
2683  av_freep(&is->audio_buf1);
2684  is->audio_buf1_size = 0;
2685  is->audio_buf = NULL;
2686  av_frame_free(&is->frame);
2687 
2688  if (is->rdft) {
2689  av_rdft_end(is->rdft);
2690  av_freep(&is->rdft_data);
2691  is->rdft = NULL;
2692  is->rdft_bits = 0;
2693  }
2694 #if CONFIG_AVFILTER
2695  avfilter_graph_free(&is->agraph);
2696 #endif
2697  break;
2698  case AVMEDIA_TYPE_VIDEO:
2699  packet_queue_abort(&is->videoq);
2700 
2701  /* note: we also signal this mutex to make sure we deblock the
2702  video thread in all cases */
2703  SDL_LockMutex(is->pictq_mutex);
2704  SDL_CondSignal(is->pictq_cond);
2705  SDL_UnlockMutex(is->pictq_mutex);
2706 
2707  SDL_WaitThread(is->video_tid, NULL);
2708 
2709  packet_queue_flush(&is->videoq);
2710  break;
2711  case AVMEDIA_TYPE_SUBTITLE:
2713 
2714  /* note: we also signal this mutex to make sure we deblock the
2715  video thread in all cases */
2716  SDL_LockMutex(is->subpq_mutex);
2717  SDL_CondSignal(is->subpq_cond);
2718  SDL_UnlockMutex(is->subpq_mutex);
2719 
2720  SDL_WaitThread(is->subtitle_tid, NULL);
2721 
2723  break;
2724  default:
2725  break;
2726  }
2727 
2728  ic->streams[stream_index]->discard = AVDISCARD_ALL;
2729  avcodec_close(avctx);
2730  switch (avctx->codec_type) {
2731  case AVMEDIA_TYPE_AUDIO:
2732  is->audio_st = NULL;
2733  is->audio_stream = -1;
2734  break;
2735  case AVMEDIA_TYPE_VIDEO:
2736  is->video_st = NULL;
2737  is->video_stream = -1;
2738  break;
2739  case AVMEDIA_TYPE_SUBTITLE:
2740  is->subtitle_st = NULL;
2741  is->subtitle_stream = -1;
2742  break;
2743  default:
2744  break;
2745  }
2746 }
2747 
2748 static int decode_interrupt_cb(void *ctx)
2749 {
2750  VideoState *is = ctx;
2751  return is->abort_request;
2752 }
2753 
2755 {
2756  if( !strcmp(s->iformat->name, "rtp")
2757  || !strcmp(s->iformat->name, "rtsp")
2758  || !strcmp(s->iformat->name, "sdp")
2759  )
2760  return 1;
2761 
2762  if(s->pb && ( !strncmp(s->filename, "rtp:", 4)
2763  || !strncmp(s->filename, "udp:", 4)
2764  )
2765  )
2766  return 1;
2767  return 0;
2768 }
2769 
2770 /* this thread gets the stream from the disk or the network */
2771 static int read_thread(void *arg)
2772 {
2773  VideoState *is = arg;
2774  AVFormatContext *ic = NULL;
2775  int err, i, ret;
2776  int st_index[AVMEDIA_TYPE_NB];
2777  AVPacket pkt1, *pkt = &pkt1;
2778  int eof = 0;
2779  int64_t stream_start_time;
2780  int pkt_in_play_range = 0;
2781  AVDictionaryEntry *t;
2782  AVDictionary **opts;
2783  int orig_nb_streams;
2784  SDL_mutex *wait_mutex = SDL_CreateMutex();
2785 
2786  memset(st_index, -1, sizeof(st_index));
2787  is->last_video_stream = is->video_stream = -1;
2788  is->last_audio_stream = is->audio_stream = -1;
2789  is->last_subtitle_stream = is->subtitle_stream = -1;
2790 
2791  ic = avformat_alloc_context();
2793  ic->interrupt_callback.opaque = is;
2794  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2795  if (err < 0) {
2796  print_error(is->filename, err);
2797  ret = -1;
2798  goto fail;
2799  }
2800  if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2801  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2803  goto fail;
2804  }
2805  is->ic = ic;
2806 
2807  if (genpts)
2808  ic->flags |= AVFMT_FLAG_GENPTS;
2809 
2811 
2813  orig_nb_streams = ic->nb_streams;
2814 
2815  err = avformat_find_stream_info(ic, opts);
2816  if (err < 0) {
2817  av_log(NULL, AV_LOG_WARNING,
2818  "%s: could not find codec parameters\n", is->filename);
2819  ret = -1;
2820  goto fail;
2821  }
2822  for (i = 0; i < orig_nb_streams; i++)
2823  av_dict_free(&opts[i]);
2824  av_freep(&opts);
2825 
2826  if (ic->pb)
2827  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2828 
2829  if (seek_by_bytes < 0)
2830  seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2831 
2832  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2833 
2834  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2835  window_title = av_asprintf("%s - %s", t->value, input_filename);
2836 
2837  /* if seeking requested, we execute it */
2838  if (start_time != AV_NOPTS_VALUE) {
2839  int64_t timestamp;
2840 
2841  timestamp = start_time;
2842  /* add the stream start time */
2843  if (ic->start_time != AV_NOPTS_VALUE)
2844  timestamp += ic->start_time;
2845  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2846  if (ret < 0) {
2847  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2848  is->filename, (double)timestamp / AV_TIME_BASE);
2849  }
2850  }
2851 
2852  is->realtime = is_realtime(ic);
2853 
2854  for (i = 0; i < ic->nb_streams; i++)
2855  ic->streams[i]->discard = AVDISCARD_ALL;
2856  if (!video_disable)
2857  st_index[AVMEDIA_TYPE_VIDEO] =
2859  wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2860  if (!audio_disable)
2861  st_index[AVMEDIA_TYPE_AUDIO] =
2864  st_index[AVMEDIA_TYPE_VIDEO],
2865  NULL, 0);
2867  st_index[AVMEDIA_TYPE_SUBTITLE] =
2870  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2871  st_index[AVMEDIA_TYPE_AUDIO] :
2872  st_index[AVMEDIA_TYPE_VIDEO]),
2873  NULL, 0);
2874  if (show_status) {
2875  av_dump_format(ic, 0, is->filename, 0);
2876  }
2877 
2878  is->show_mode = show_mode;
2879  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2880  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2881  AVCodecContext *avctx = st->codec;
2882  AVRational sar = av_guess_sample_aspect_ratio(ic, st, NULL);
2883  if (avctx->width)
2884  set_default_window_size(avctx->width, avctx->height, sar);
2885  }
2886 
2887  /* open the streams */
2888  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2889  stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2890  }
2891 
2892  ret = -1;
2893  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2894  ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2895  }
2896  if (is->show_mode == SHOW_MODE_NONE)
2897  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2898 
2899  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2900  stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2901  }
2902 
2903  if (is->video_stream < 0 && is->audio_stream < 0) {
2904  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2905  is->filename);
2906  ret = -1;
2907  goto fail;
2908  }
2909 
2910  if (infinite_buffer < 0 && is->realtime)
2911  infinite_buffer = 1;
2912 
2913  for (;;) {
2914  if (is->abort_request)
2915  break;
2916  if (is->paused != is->last_paused) {
2917  is->last_paused = is->paused;
2918  if (is->paused)
2919  is->read_pause_return = av_read_pause(ic);
2920  else
2921  av_read_play(ic);
2922  }
2923 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2924  if (is->paused &&
2925  (!strcmp(ic->iformat->name, "rtsp") ||
2926  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2927  /* wait 10 ms to avoid trying to get another packet */
2928  /* XXX: horrible */
2929  SDL_Delay(10);
2930  continue;
2931  }
2932 #endif
2933  if (is->seek_req) {
2934  int64_t seek_target = is->seek_pos;
2935  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2936  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2937 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2938 // of the seek_pos/seek_rel variables
2939 
2940  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2941  if (ret < 0) {
2942  av_log(NULL, AV_LOG_ERROR,
2943  "%s: error while seeking\n", is->ic->filename);
2944  } else {
2945  if (is->audio_stream >= 0) {
2946  packet_queue_flush(&is->audioq);
2947  packet_queue_put(&is->audioq, &flush_pkt);
2948  }
2949  if (is->subtitle_stream >= 0) {
2951  packet_queue_put(&is->subtitleq, &flush_pkt);
2952  }
2953  if (is->video_stream >= 0) {
2954  packet_queue_flush(&is->videoq);
2955  packet_queue_put(&is->videoq, &flush_pkt);
2956  }
2957  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2958  set_clock(&is->extclk, NAN, 0);
2959  } else {
2960  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
2961  }
2962  }
2963  is->seek_req = 0;
2964  is->queue_attachments_req = 1;
2965  eof = 0;
2966  if (is->paused)
2967  step_to_next_frame(is);
2968  }
2969  if (is->queue_attachments_req) {
2971  AVPacket copy;
2972  if ((ret = av_copy_packet(&copy, &is->video_st->attached_pic)) < 0)
2973  goto fail;
2974  packet_queue_put(&is->videoq, &copy);
2976  }
2977  is->queue_attachments_req = 0;
2978  }
2979 
2980  /* if the queue are full, no need to read more */
2981  if (infinite_buffer<1 &&
2982  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2983  || ( (is->audioq .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2984  && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request
2986  && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
2987  /* wait 10 ms */
2988  SDL_LockMutex(wait_mutex);
2989  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2990  SDL_UnlockMutex(wait_mutex);
2991  continue;
2992  }
2993  if (!is->paused &&
2994  (!is->audio_st || is->audio_finished == is->audioq.serial) &&
2995  (!is->video_st || (is->video_finished == is->videoq.serial && pictq_nb_remaining(is) == 0))) {
2996  if (loop != 1 && (!loop || --loop)) {
2997  stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2998  } else if (autoexit) {
2999  ret = AVERROR_EOF;
3000  goto fail;
3001  }
3002  }
3003  if (eof) {
3004  if (is->video_stream >= 0)
3006  if (is->audio_stream >= 0)
3008  if (is->subtitle_stream >= 0)
3010  SDL_Delay(10);
3011  eof=0;
3012  continue;
3013  }
3014  ret = av_read_frame(ic, pkt);
3015  if (ret < 0) {
3016  if (ret == AVERROR_EOF || avio_feof(ic->pb))
3017  eof = 1;
3018  if (ic->pb && ic->pb->error)
3019  break;
3020  SDL_LockMutex(wait_mutex);
3021  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3022  SDL_UnlockMutex(wait_mutex);
3023  continue;
3024  }
3025  /* check if packet is in play range specified by user, then queue, otherwise discard */
3026  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3027  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3028  (pkt->pts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3029  av_q2d(ic->streams[pkt->stream_index]->time_base) -
3030  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3031  <= ((double)duration / 1000000);
3032  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3033  packet_queue_put(&is->audioq, pkt);
3034  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3036  packet_queue_put(&is->videoq, pkt);
3037  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3038  packet_queue_put(&is->subtitleq, pkt);
3039  } else {
3040  av_free_packet(pkt);
3041  }
3042  }
3043  /* wait until the end */
3044  while (!is->abort_request) {
3045  SDL_Delay(100);
3046  }
3047 
3048  ret = 0;
3049  fail:
3050  /* close each stream */
3051  if (is->audio_stream >= 0)
3053  if (is->video_stream >= 0)
3055  if (is->subtitle_stream >= 0)
3057  if (is->ic) {
3058  avformat_close_input(&is->ic);
3059  }
3060 
3061  if (ret != 0) {
3062  SDL_Event event;
3063 
3064  event.type = FF_QUIT_EVENT;
3065  event.user.data1 = is;
3066  SDL_PushEvent(&event);
3067  }
3068  SDL_DestroyMutex(wait_mutex);
3069  return 0;
3070 }
3071 
3072 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
3073 {
3074  VideoState *is;
3075 
3076  is = av_mallocz(sizeof(VideoState));
3077  if (!is)
3078  return NULL;
3079  av_strlcpy(is->filename, filename, sizeof(is->filename));
3080  is->iformat = iformat;
3081  is->ytop = 0;
3082  is->xleft = 0;
3083 
3084  /* start video display */
3085  is->pictq_mutex = SDL_CreateMutex();
3086  is->pictq_cond = SDL_CreateCond();
3087 
3088  is->subpq_mutex = SDL_CreateMutex();
3089  is->subpq_cond = SDL_CreateCond();
3090 
3091  packet_queue_init(&is->videoq);
3092  packet_queue_init(&is->audioq);
3094 
3095  is->continue_read_thread = SDL_CreateCond();
3096 
3097  init_clock(&is->vidclk, &is->videoq.serial);
3098  init_clock(&is->audclk, &is->audioq.serial);
3099  init_clock(&is->extclk, &is->extclk.serial);
3100  is->audio_clock_serial = -1;
3101  is->audio_last_serial = -1;
3102  is->av_sync_type = av_sync_type;
3103  is->read_tid = SDL_CreateThread(read_thread, is);
3104  if (!is->read_tid) {
3105  av_free(is);
3106  return NULL;
3107  }
3108  return is;
3109 }
3110 
3112 {
3113  AVFormatContext *ic = is->ic;
3114  int start_index, stream_index;
3115  int old_index;
3116  AVStream *st;
3117  AVProgram *p = NULL;
3118  int nb_streams = is->ic->nb_streams;
3119 
3120  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3121  start_index = is->last_video_stream;
3122  old_index = is->video_stream;
3123  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3124  start_index = is->last_audio_stream;
3125  old_index = is->audio_stream;
3126  } else {
3127  start_index = is->last_subtitle_stream;
3128  old_index = is->subtitle_stream;
3129  }
3130  stream_index = start_index;
3131 
3132  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3133  p = av_find_program_from_stream(ic, NULL, is->video_stream);
3134  if (p) {
3135  nb_streams = p->nb_stream_indexes;
3136  for (start_index = 0; start_index < nb_streams; start_index++)
3137  if (p->stream_index[start_index] == stream_index)
3138  break;
3139  if (start_index == nb_streams)
3140  start_index = -1;
3141  stream_index = start_index;
3142  }
3143  }
3144 
3145  for (;;) {
3146  if (++stream_index >= nb_streams)
3147  {
3148  if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3149  {
3150  stream_index = -1;
3151  is->last_subtitle_stream = -1;
3152  goto the_end;
3153  }
3154  if (start_index == -1)
3155  return;
3156  stream_index = 0;
3157  }
3158  if (stream_index == start_index)
3159  return;
3160  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3161  if (st->codec->codec_type == codec_type) {
3162  /* check that parameters are OK */
3163  switch (codec_type) {
3164  case AVMEDIA_TYPE_AUDIO:
3165  if (st->codec->sample_rate != 0 &&
3166  st->codec->channels != 0)
3167  goto the_end;
3168  break;
3169  case AVMEDIA_TYPE_VIDEO:
3170  case AVMEDIA_TYPE_SUBTITLE:
3171  goto the_end;
3172  default:
3173  break;
3174  }
3175  }
3176  }
3177  the_end:
3178  if (p && stream_index != -1)
3179  stream_index = p->stream_index[stream_index];
3180  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3181  av_get_media_type_string(codec_type),
3182  old_index,
3183  stream_index);
3184 
3185  stream_component_close(is, old_index);
3186  stream_component_open(is, stream_index);
3187 }
3188 
3189 
3191 {
3192 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
3193  /* OS X needs to reallocate the SDL overlays */
3194  int i;
3195  for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
3196  is->pictq[i].reallocate = 1;
3197 #endif
3199  video_open(is, 1, NULL);
3200 }
3201 
3203 {
3204  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
3205  int next = is->show_mode;
3206  do {
3207  next = (next + 1) % SHOW_MODE_NB;
3208  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3209  if (is->show_mode != next) {
3211  is->xleft, is->ytop, is->width, is->height,
3212  bgcolor, 1);
3213  is->force_refresh = 1;
3214  is->show_mode = next;
3215  }
3216 }
3217 
3218 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3219  double remaining_time = 0.0;
3220  SDL_PumpEvents();
3221  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
3223  SDL_ShowCursor(0);
3224  cursor_hidden = 1;
3225  }
3226  if (remaining_time > 0.0)
3227  av_usleep((int64_t)(remaining_time * 1000000.0));
3228  remaining_time = REFRESH_RATE;
3229  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3230  video_refresh(is, &remaining_time);
3231  SDL_PumpEvents();
3232  }
3233 }
3234 
3235 static void seek_chapter(VideoState *is, int incr)
3236 {
3237  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3238  int i;
3239 
3240  if (!is->ic->nb_chapters)
3241  return;
3242 
3243  /* find the current chapter */
3244  for (i = 0; i < is->ic->nb_chapters; i++) {
3245  AVChapter *ch = is->ic->chapters[i];
3246  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3247  i--;
3248  break;
3249  }
3250  }
3251 
3252  i += incr;
3253  i = FFMAX(i, 0);
3254  if (i >= is->ic->nb_chapters)
3255  return;
3256 
3257  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3258  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3259  AV_TIME_BASE_Q), 0, 0);
3260 }
3261 
3262 /* handle an event sent by the GUI */
3263 static void event_loop(VideoState *cur_stream)
3264 {
3265  SDL_Event event;
3266  double incr, pos, frac;
3267 
3268  for (;;) {
3269  double x;
3270  refresh_loop_wait_event(cur_stream, &event);
3271  switch (event.type) {
3272  case SDL_KEYDOWN:
3273  if (exit_on_keydown) {
3274  do_exit(cur_stream);
3275  break;
3276  }
3277  switch (event.key.keysym.sym) {
3278  case SDLK_ESCAPE:
3279  case SDLK_q:
3280  do_exit(cur_stream);
3281  break;
3282  case SDLK_f:
3283  toggle_full_screen(cur_stream);
3284  cur_stream->force_refresh = 1;
3285  break;
3286  case SDLK_p:
3287  case SDLK_SPACE:
3288  toggle_pause(cur_stream);
3289  break;
3290  case SDLK_s: // S: Step to next frame
3291  step_to_next_frame(cur_stream);
3292  break;
3293  case SDLK_a:
3295  break;
3296  case SDLK_v:
3298  break;
3299  case SDLK_c:
3303  break;
3304  case SDLK_t:
3306  break;
3307  case SDLK_w:
3308 #if CONFIG_AVFILTER
3309  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3310  if (++cur_stream->vfilter_idx >= nb_vfilters)
3311  cur_stream->vfilter_idx = 0;
3312  } else {
3313  cur_stream->vfilter_idx = 0;
3314  toggle_audio_display(cur_stream);
3315  }
3316 #else
3317  toggle_audio_display(cur_stream);
3318 #endif
3319  break;
3320  case SDLK_PAGEUP:
3321  if (cur_stream->ic->nb_chapters <= 1) {
3322  incr = 600.0;
3323  goto do_seek;
3324  }
3325  seek_chapter(cur_stream, 1);
3326  break;
3327  case SDLK_PAGEDOWN:
3328  if (cur_stream->ic->nb_chapters <= 1) {
3329  incr = -600.0;
3330  goto do_seek;
3331  }
3332  seek_chapter(cur_stream, -1);
3333  break;
3334  case SDLK_LEFT:
3335  incr = -10.0;
3336  goto do_seek;
3337  case SDLK_RIGHT:
3338  incr = 10.0;
3339  goto do_seek;
3340  case SDLK_UP:
3341  incr = 60.0;
3342  goto do_seek;
3343  case SDLK_DOWN:
3344  incr = -60.0;
3345  do_seek:
3346  if (seek_by_bytes) {
3347  if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
3348  pos = cur_stream->video_current_pos;
3349  } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
3350  pos = cur_stream->audio_pkt.pos;
3351  } else
3352  pos = avio_tell(cur_stream->ic->pb);
3353  if (cur_stream->ic->bit_rate)
3354  incr *= cur_stream->ic->bit_rate / 8.0;
3355  else
3356  incr *= 180000.0;
3357  pos += incr;
3358  stream_seek(cur_stream, pos, incr, 1);
3359  } else {
3360  pos = get_master_clock(cur_stream);
3361  if (isnan(pos))
3362  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3363  pos += incr;
3364  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3365  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3366  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3367  }
3368  break;
3369  default:
3370  break;
3371  }
3372  break;
3373  case SDL_VIDEOEXPOSE:
3374  cur_stream->force_refresh = 1;
3375  break;
3376  case SDL_MOUSEBUTTONDOWN:
3377  if (exit_on_mousedown) {
3378  do_exit(cur_stream);
3379  break;
3380  }
3381  case SDL_MOUSEMOTION:
3382  if (cursor_hidden) {
3383  SDL_ShowCursor(1);
3384  cursor_hidden = 0;
3385  }
3387  if (event.type == SDL_MOUSEBUTTONDOWN) {
3388  x = event.button.x;
3389  } else {
3390  if (event.motion.state != SDL_PRESSED)
3391  break;
3392  x = event.motion.x;
3393  }
3394  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3395  uint64_t size = avio_size(cur_stream->ic->pb);
3396  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3397  } else {
3398  int64_t ts;
3399  int ns, hh, mm, ss;
3400  int tns, thh, tmm, tss;
3401  tns = cur_stream->ic->duration / 1000000LL;
3402  thh = tns / 3600;
3403  tmm = (tns % 3600) / 60;
3404  tss = (tns % 60);
3405  frac = x / cur_stream->width;
3406  ns = frac * tns;
3407  hh = ns / 3600;
3408  mm = (ns % 3600) / 60;
3409  ss = (ns % 60);
3410  av_log(NULL, AV_LOG_INFO,
3411  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3412  hh, mm, ss, thh, tmm, tss);
3413  ts = frac * cur_stream->ic->duration;
3414  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3415  ts += cur_stream->ic->start_time;
3416  stream_seek(cur_stream, ts, 0, 0);
3417  }
3418  break;
3419  case SDL_VIDEORESIZE:
3420  screen = SDL_SetVideoMode(FFMIN(16383, event.resize.w), event.resize.h, 0,
3421  SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
3422  if (!screen) {
3423  av_log(NULL, AV_LOG_FATAL, "Failed to set video mode\n");
3424  do_exit(cur_stream);
3425  }
3426  screen_width = cur_stream->width = screen->w;
3427  screen_height = cur_stream->height = screen->h;
3428  cur_stream->force_refresh = 1;
3429  break;
3430  case SDL_QUIT:
3431  case FF_QUIT_EVENT:
3432  do_exit(cur_stream);
3433  break;
3434  case FF_ALLOC_EVENT:
3435  alloc_picture(event.user.data1);
3436  break;
3437  default:
3438  break;
3439  }
3440  }
3441 }
3442 
3443 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3444 {
3445  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3446  return opt_default(NULL, "video_size", arg);
3447 }
3448 
3449 static int opt_width(void *optctx, const char *opt, const char *arg)
3450 {
3451  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3452  return 0;
3453 }
3454 
3455 static int opt_height(void *optctx, const char *opt, const char *arg)
3456 {
3457  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3458  return 0;
3459 }
3460 
3461 static int opt_format(void *optctx, const char *opt, const char *arg)
3462 {
3463  file_iformat = av_find_input_format(arg);
3464  if (!file_iformat) {
3465  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3466  return AVERROR(EINVAL);
3467  }
3468  return 0;
3469 }
3470 
3471 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3472 {
3473  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3474  return opt_default(NULL, "pixel_format", arg);
3475 }
3476 
3477 static int opt_sync(void *optctx, const char *opt, const char *arg)
3478 {
3479  if (!strcmp(arg, "audio"))
3481  else if (!strcmp(arg, "video"))
3483  else if (!strcmp(arg, "ext"))
3485  else {
3486  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3487  exit(1);
3488  }
3489  return 0;
3490 }
3491 
3492 static int opt_seek(void *optctx, const char *opt, const char *arg)
3493 {
3494  start_time = parse_time_or_die(opt, arg, 1);
3495  return 0;
3496 }
3497 
3498 static int opt_duration(void *optctx, const char *opt, const char *arg)
3499 {
3500  duration = parse_time_or_die(opt, arg, 1);
3501  return 0;
3502 }
3503 
3504 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3505 {
3506  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3507  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3508  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3509  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3510  return 0;
3511 }
3512 
3513 static void opt_input_file(void *optctx, const char *filename)
3514 {
3515  if (input_filename) {
3516  av_log(NULL, AV_LOG_FATAL,
3517  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3518  filename, input_filename);
3519  exit(1);
3520  }
3521  if (!strcmp(filename, "-"))
3522  filename = "pipe:";
3523  input_filename = filename;
3524 }
3525 
3526 static int opt_codec(void *optctx, const char *opt, const char *arg)
3527 {
3528  const char *spec = strchr(opt, ':');
3529  if (!spec) {
3530  av_log(NULL, AV_LOG_ERROR,
3531  "No media specifier was specified in '%s' in option '%s'\n",
3532  arg, opt);
3533  return AVERROR(EINVAL);
3534  }
3535  spec++;
3536  switch (spec[0]) {
3537  case 'a' : audio_codec_name = arg; break;
3538  case 's' : subtitle_codec_name = arg; break;
3539  case 'v' : video_codec_name = arg; break;
3540  default:
3541  av_log(NULL, AV_LOG_ERROR,
3542  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3543  return AVERROR(EINVAL);
3544  }
3545  return 0;
3546 }
3547 
3548 static int dummy;
3549 
3550 static const OptionDef options[] = {
3551 #include "cmdutils_common_opts.h"
3552  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3553  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3554  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3555  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3556  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3557  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3558  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3559  { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3560  { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3561  { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3562  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3563  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3564  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3565  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3566  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3567  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3568  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3569  { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
3570  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3571  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3572  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3573  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3574  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3575  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3576  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3577  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3578  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3579  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3580  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3581  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3582 #if CONFIG_AVFILTER
3583  { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3584  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3585 #endif
3586  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3587  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3588  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3589  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3590  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3591  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3592  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3593  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3594  { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3595  { NULL, },
3596 };
3597 
3598 static void show_usage(void)
3599 {
3600  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3601  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3602  av_log(NULL, AV_LOG_INFO, "\n");
3603 }
3604 
3605 void show_help_default(const char *opt, const char *arg)
3606 {
3608  show_usage();
3609  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3610  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3611  printf("\n");
3614 #if !CONFIG_AVFILTER
3616 #else
3618 #endif
3619  printf("\nWhile playing:\n"
3620  "q, ESC quit\n"
3621  "f toggle full screen\n"
3622  "p, SPC pause\n"
3623  "a cycle audio channel in the current program\n"
3624  "v cycle video channel\n"
3625  "t cycle subtitle channel in the current program\n"
3626  "c cycle program\n"
3627  "w cycle video filters or show modes\n"
3628  "s activate frame-step mode\n"
3629  "left/right seek backward/forward 10 seconds\n"
3630  "down/up seek backward/forward 1 minute\n"
3631  "page down/page up seek backward/forward 10 minutes\n"
3632  "mouse click seek to percentage in file corresponding to fraction of width\n"
3633  );
3634 }
3635 
3636 static int lockmgr(void **mtx, enum AVLockOp op)
3637 {
3638  switch(op) {
3639  case AV_LOCK_CREATE:
3640  *mtx = SDL_CreateMutex();
3641  if(!*mtx)
3642  return 1;
3643  return 0;
3644  case AV_LOCK_OBTAIN:
3645  return !!SDL_LockMutex(*mtx);
3646  case AV_LOCK_RELEASE:
3647  return !!SDL_UnlockMutex(*mtx);
3648  case AV_LOCK_DESTROY:
3649  SDL_DestroyMutex(*mtx);
3650  return 0;
3651  }
3652  return 1;
3653 }
3654 
3655 /* Called from the main */
3656 int main(int argc, char **argv)
3657 {
3658  int flags;
3659  VideoState *is;
3660  char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3661 
3663  parse_loglevel(argc, argv, options);
3664 
3665  /* register all codecs, demux and protocols */
3666 #if CONFIG_AVDEVICE
3668 #endif
3669 #if CONFIG_AVFILTER
3671 #endif
3672  av_register_all();
3674 
3675  init_opts();
3676 
3677  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3678  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3679 
3680  show_banner(argc, argv, options);
3681 
3682  parse_options(NULL, argc, argv, options, opt_input_file);
3683 
3684  if (!input_filename) {
3685  show_usage();
3686  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3687  av_log(NULL, AV_LOG_FATAL,
3688  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3689  exit(1);
3690  }
3691 
3692  if (display_disable) {
3693  video_disable = 1;
3694  }
3695  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3696  if (audio_disable)
3697  flags &= ~SDL_INIT_AUDIO;
3698  if (display_disable)
3699  SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3700 #if !defined(_WIN32) && !defined(__APPLE__)
3701  flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3702 #endif
3703  if (SDL_Init (flags)) {
3704  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3705  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3706  exit(1);
3707  }
3708 
3709  if (!display_disable) {
3710  const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3711  fs_screen_width = vi->current_w;
3712  fs_screen_height = vi->current_h;
3713  }
3714 
3715  SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3716  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3717  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3718 
3720  av_log(NULL, AV_LOG_FATAL, "Could not initialize lock manager!\n");
3721  do_exit(NULL);
3722  }
3723 
3724  av_init_packet(&flush_pkt);
3725  flush_pkt.data = (uint8_t *)&flush_pkt;
3726 
3727  is = stream_open(input_filename, file_iformat);
3728  if (!is) {
3729  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3730  do_exit(NULL);
3731  }
3732 
3733  event_loop(is);
3734 
3735  /* never returns */
3736 
3737  return 0;
3738 }