FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
34 #include "libavutil/eval.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/avassert.h"
42 #include "libavutil/time.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavcodec/avfft.h"
49 
50 #if CONFIG_AVFILTER
51 # include "libavfilter/avcodec.h"
52 # include "libavfilter/avfilter.h"
53 # include "libavfilter/buffersink.h"
54 # include "libavfilter/buffersrc.h"
55 #endif
56 
57 #include <SDL.h>
58 #include <SDL_thread.h>
59 
60 #include "cmdutils.h"
61 
62 #include <assert.h>
63 
64 const char program_name[] = "ffplay";
65 const int program_birth_year = 2003;
66 
67 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
68 #define MIN_FRAMES 25
69 #define EXTERNAL_CLOCK_MIN_FRAMES 2
70 #define EXTERNAL_CLOCK_MAX_FRAMES 10
71 
72 /* Minimum SDL audio buffer size, in samples. */
73 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
74 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
75 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
76 
77 /* no AV sync correction is done if below the minimum AV sync threshold */
78 #define AV_SYNC_THRESHOLD_MIN 0.04
79 /* AV sync correction is done if above the maximum AV sync threshold */
80 #define AV_SYNC_THRESHOLD_MAX 0.1
81 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
82 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
83 /* no AV correction is done if too big error */
84 #define AV_NOSYNC_THRESHOLD 10.0
85 
86 /* maximum audio speed change to get correct sync */
87 #define SAMPLE_CORRECTION_PERCENT_MAX 10
88 
89 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
90 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
91 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
92 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
93 
94 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
95 #define AUDIO_DIFF_AVG_NB 20
96 
97 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
98 #define REFRESH_RATE 0.01
99 
100 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
101 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
102 #define SAMPLE_ARRAY_SIZE (8 * 65536)
103 
104 #define CURSOR_HIDE_DELAY 1000000
105 
106 static unsigned sws_flags = SWS_BICUBIC;
107 
108 typedef struct MyAVPacketList {
111  int serial;
113 
114 typedef struct PacketQueue {
117  int size;
119  int serial;
120  SDL_mutex *mutex;
121  SDL_cond *cond;
122 } PacketQueue;
123 
124 #define VIDEO_PICTURE_QUEUE_SIZE 3
125 #define SUBPICTURE_QUEUE_SIZE 16
126 #define SAMPLE_QUEUE_SIZE 9
127 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
128 
129 typedef struct AudioParams {
130  int freq;
131  int channels;
132  int64_t channel_layout;
136 } AudioParams;
137 
138 typedef struct Clock {
139  double pts; /* clock base */
140  double pts_drift; /* clock base minus time at which we updated the clock */
141  double last_updated;
142  double speed;
143  int serial; /* clock is based on a packet with this serial */
144  int paused;
145  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
146 } Clock;
147 
148 /* Common struct for handling all types of decoded data and allocated render buffers. */
149 typedef struct Frame {
152  int serial;
153  double pts; /* presentation timestamp for the frame */
154  double duration; /* estimated duration of the frame */
155  int64_t pos; /* byte position of the frame in the input file */
156  SDL_Overlay *bmp;
159  int width;
160  int height;
162 } Frame;
163 
164 typedef struct FrameQueue {
166  int rindex;
167  int windex;
168  int size;
169  int max_size;
172  SDL_mutex *mutex;
173  SDL_cond *cond;
175 } FrameQueue;
176 
177 enum {
178  AV_SYNC_AUDIO_MASTER, /* default choice */
180  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
181 };
182 
183 typedef struct Decoder {
189  int finished;
191  SDL_cond *empty_queue_cond;
192  int64_t start_pts;
194  int64_t next_pts;
196  SDL_Thread *decoder_tid;
197 } Decoder;
198 
199 typedef struct VideoState {
200  SDL_Thread *read_tid;
204  int paused;
207  int seek_req;
209  int64_t seek_pos;
210  int64_t seek_rel;
213  int realtime;
214 
218 
222 
226 
229 
231 
233 
234  double audio_clock;
236  double audio_diff_cum; /* used for AV difference average computation */
246  unsigned int audio_buf_size; /* in bytes */
247  unsigned int audio_buf1_size;
248  int audio_buf_index; /* in bytes */
251 #if CONFIG_AVFILTER
252  struct AudioParams audio_filter_src;
253 #endif
258 
259  enum ShowMode {
261  } show_mode;
268  int xpos;
270 
274 
275  double frame_timer;
281  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
282 #if !CONFIG_AVFILTER
284 #endif
287  int eof;
288 
289  char filename[1024];
291  int step;
292 
293 #if CONFIG_AVFILTER
294  int vfilter_idx;
295  AVFilterContext *in_video_filter; // the first filter in the video chain
296  AVFilterContext *out_video_filter; // the last filter in the video chain
297  AVFilterContext *in_audio_filter; // the first filter in the audio chain
298  AVFilterContext *out_audio_filter; // the last filter in the audio chain
299  AVFilterGraph *agraph; // audio filter graph
300 #endif
301 
303 
305 } VideoState;
306 
307 /* options specified by the user */
309 static const char *input_filename;
310 static const char *window_title;
311 static int fs_screen_width;
312 static int fs_screen_height;
313 static int default_width = 640;
314 static int default_height = 480;
315 static int screen_width = 0;
316 static int screen_height = 0;
317 static int audio_disable;
318 static int video_disable;
319 static int subtitle_disable;
320 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
321 static int seek_by_bytes = -1;
322 static int display_disable;
323 static int show_status = 1;
325 static int64_t start_time = AV_NOPTS_VALUE;
326 static int64_t duration = AV_NOPTS_VALUE;
327 static int fast = 0;
328 static int genpts = 0;
329 static int lowres = 0;
330 static int decoder_reorder_pts = -1;
331 static int autoexit;
332 static int exit_on_keydown;
333 static int exit_on_mousedown;
334 static int loop = 1;
335 static int framedrop = -1;
336 static int infinite_buffer = -1;
337 static enum ShowMode show_mode = SHOW_MODE_NONE;
338 static const char *audio_codec_name;
339 static const char *subtitle_codec_name;
340 static const char *video_codec_name;
341 double rdftspeed = 0.02;
342 static int64_t cursor_last_shown;
343 static int cursor_hidden = 0;
344 #if CONFIG_AVFILTER
345 static const char **vfilters_list = NULL;
346 static int nb_vfilters = 0;
347 static char *afilters = NULL;
348 #endif
349 static int autorotate = 1;
350 
351 /* current context */
352 static int is_full_screen;
353 static int64_t audio_callback_time;
354 
356 
357 #define FF_ALLOC_EVENT (SDL_USEREVENT)
358 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
359 
360 static SDL_Surface *screen;
361 
362 #if CONFIG_AVFILTER
363 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
364 {
365  GROW_ARRAY(vfilters_list, nb_vfilters);
366  vfilters_list[nb_vfilters - 1] = arg;
367  return 0;
368 }
369 #endif
370 
371 static inline
372 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
373  enum AVSampleFormat fmt2, int64_t channel_count2)
374 {
375  /* If channel count == 1, planar and non-planar formats are the same */
376  if (channel_count1 == 1 && channel_count2 == 1)
378  else
379  return channel_count1 != channel_count2 || fmt1 != fmt2;
380 }
381 
382 static inline
383 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
384 {
385  if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
386  return channel_layout;
387  else
388  return 0;
389 }
390 
391 static void free_picture(Frame *vp);
392 
394 {
395  MyAVPacketList *pkt1;
396 
397  if (q->abort_request)
398  return -1;
399 
400  pkt1 = av_malloc(sizeof(MyAVPacketList));
401  if (!pkt1)
402  return -1;
403  pkt1->pkt = *pkt;
404  pkt1->next = NULL;
405  if (pkt == &flush_pkt)
406  q->serial++;
407  pkt1->serial = q->serial;
408 
409  if (!q->last_pkt)
410  q->first_pkt = pkt1;
411  else
412  q->last_pkt->next = pkt1;
413  q->last_pkt = pkt1;
414  q->nb_packets++;
415  q->size += pkt1->pkt.size + sizeof(*pkt1);
416  /* XXX: should duplicate packet data in DV case */
417  SDL_CondSignal(q->cond);
418  return 0;
419 }
420 
422 {
423  int ret;
424 
425  /* duplicate the packet */
426  if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
427  return -1;
428 
429  SDL_LockMutex(q->mutex);
430  ret = packet_queue_put_private(q, pkt);
431  SDL_UnlockMutex(q->mutex);
432 
433  if (pkt != &flush_pkt && ret < 0)
434  av_free_packet(pkt);
435 
436  return ret;
437 }
438 
439 static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
440 {
441  AVPacket pkt1, *pkt = &pkt1;
442  av_init_packet(pkt);
443  pkt->data = NULL;
444  pkt->size = 0;
445  pkt->stream_index = stream_index;
446  return packet_queue_put(q, pkt);
447 }
448 
449 /* packet queue handling */
451 {
452  memset(q, 0, sizeof(PacketQueue));
453  q->mutex = SDL_CreateMutex();
454  q->cond = SDL_CreateCond();
455  q->abort_request = 1;
456 }
457 
459 {
460  MyAVPacketList *pkt, *pkt1;
461 
462  SDL_LockMutex(q->mutex);
463  for (pkt = q->first_pkt; pkt; pkt = pkt1) {
464  pkt1 = pkt->next;
465  av_free_packet(&pkt->pkt);
466  av_freep(&pkt);
467  }
468  q->last_pkt = NULL;
469  q->first_pkt = NULL;
470  q->nb_packets = 0;
471  q->size = 0;
472  SDL_UnlockMutex(q->mutex);
473 }
474 
476 {
478  SDL_DestroyMutex(q->mutex);
479  SDL_DestroyCond(q->cond);
480 }
481 
483 {
484  SDL_LockMutex(q->mutex);
485 
486  q->abort_request = 1;
487 
488  SDL_CondSignal(q->cond);
489 
490  SDL_UnlockMutex(q->mutex);
491 }
492 
494 {
495  SDL_LockMutex(q->mutex);
496  q->abort_request = 0;
497  packet_queue_put_private(q, &flush_pkt);
498  SDL_UnlockMutex(q->mutex);
499 }
500 
501 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
502 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
503 {
504  MyAVPacketList *pkt1;
505  int ret;
506 
507  SDL_LockMutex(q->mutex);
508 
509  for (;;) {
510  if (q->abort_request) {
511  ret = -1;
512  break;
513  }
514 
515  pkt1 = q->first_pkt;
516  if (pkt1) {
517  q->first_pkt = pkt1->next;
518  if (!q->first_pkt)
519  q->last_pkt = NULL;
520  q->nb_packets--;
521  q->size -= pkt1->pkt.size + sizeof(*pkt1);
522  *pkt = pkt1->pkt;
523  if (serial)
524  *serial = pkt1->serial;
525  av_free(pkt1);
526  ret = 1;
527  break;
528  } else if (!block) {
529  ret = 0;
530  break;
531  } else {
532  SDL_CondWait(q->cond, q->mutex);
533  }
534  }
535  SDL_UnlockMutex(q->mutex);
536  return ret;
537 }
538 
539 static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
540  memset(d, 0, sizeof(Decoder));
541  d->avctx = avctx;
542  d->queue = queue;
543  d->empty_queue_cond = empty_queue_cond;
545 }
546 
548  int got_frame = 0;
549 
550  do {
551  int ret = -1;
552 
553  if (d->queue->abort_request)
554  return -1;
555 
556  if (!d->packet_pending || d->queue->serial != d->pkt_serial) {
557  AVPacket pkt;
558  do {
559  if (d->queue->nb_packets == 0)
560  SDL_CondSignal(d->empty_queue_cond);
561  if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
562  return -1;
563  if (pkt.data == flush_pkt.data) {
565  d->finished = 0;
566  d->next_pts = d->start_pts;
567  d->next_pts_tb = d->start_pts_tb;
568  }
569  } while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial);
570  av_free_packet(&d->pkt);
571  d->pkt_temp = d->pkt = pkt;
572  d->packet_pending = 1;
573  }
574 
575  switch (d->avctx->codec_type) {
576  case AVMEDIA_TYPE_VIDEO:
577  ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp);
578  if (got_frame) {
579  if (decoder_reorder_pts == -1) {
580  frame->pts = av_frame_get_best_effort_timestamp(frame);
581  } else if (decoder_reorder_pts) {
582  frame->pts = frame->pkt_pts;
583  } else {
584  frame->pts = frame->pkt_dts;
585  }
586  }
587  break;
588  case AVMEDIA_TYPE_AUDIO:
589  ret = avcodec_decode_audio4(d->avctx, frame, &got_frame, &d->pkt_temp);
590  if (got_frame) {
591  AVRational tb = (AVRational){1, frame->sample_rate};
592  if (frame->pts != AV_NOPTS_VALUE)
593  frame->pts = av_rescale_q(frame->pts, d->avctx->time_base, tb);
594  else if (frame->pkt_pts != AV_NOPTS_VALUE)
595  frame->pts = av_rescale_q(frame->pkt_pts, av_codec_get_pkt_timebase(d->avctx), tb);
596  else if (d->next_pts != AV_NOPTS_VALUE)
597  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
598  if (frame->pts != AV_NOPTS_VALUE) {
599  d->next_pts = frame->pts + frame->nb_samples;
600  d->next_pts_tb = tb;
601  }
602  }
603  break;
605  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &d->pkt_temp);
606  break;
607  }
608 
609  if (ret < 0) {
610  d->packet_pending = 0;
611  } else {
612  d->pkt_temp.dts =
614  if (d->pkt_temp.data) {
616  ret = d->pkt_temp.size;
617  d->pkt_temp.data += ret;
618  d->pkt_temp.size -= ret;
619  if (d->pkt_temp.size <= 0)
620  d->packet_pending = 0;
621  } else {
622  if (!got_frame) {
623  d->packet_pending = 0;
624  d->finished = d->pkt_serial;
625  }
626  }
627  }
628  } while (!got_frame && !d->finished);
629 
630  return got_frame;
631 }
632 
633 static void decoder_destroy(Decoder *d) {
634  av_free_packet(&d->pkt);
635 }
636 
638 {
639  av_frame_unref(vp->frame);
640  avsubtitle_free(&vp->sub);
641 }
642 
643 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
644 {
645  int i;
646  memset(f, 0, sizeof(FrameQueue));
647  if (!(f->mutex = SDL_CreateMutex()))
648  return AVERROR(ENOMEM);
649  if (!(f->cond = SDL_CreateCond()))
650  return AVERROR(ENOMEM);
651  f->pktq = pktq;
652  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
653  f->keep_last = !!keep_last;
654  for (i = 0; i < f->max_size; i++)
655  if (!(f->queue[i].frame = av_frame_alloc()))
656  return AVERROR(ENOMEM);
657  return 0;
658 }
659 
661 {
662  int i;
663  for (i = 0; i < f->max_size; i++) {
664  Frame *vp = &f->queue[i];
666  av_frame_free(&vp->frame);
667  free_picture(vp);
668  }
669  SDL_DestroyMutex(f->mutex);
670  SDL_DestroyCond(f->cond);
671 }
672 
674 {
675  SDL_LockMutex(f->mutex);
676  SDL_CondSignal(f->cond);
677  SDL_UnlockMutex(f->mutex);
678 }
679 
681 {
682  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
683 }
684 
686 {
687  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
688 }
689 
691 {
692  return &f->queue[f->rindex];
693 }
694 
696 {
697  /* wait until we have space to put a new frame */
698  SDL_LockMutex(f->mutex);
699  while (f->size >= f->max_size &&
700  !f->pktq->abort_request) {
701  SDL_CondWait(f->cond, f->mutex);
702  }
703  SDL_UnlockMutex(f->mutex);
704 
705  if (f->pktq->abort_request)
706  return NULL;
707 
708  return &f->queue[f->windex];
709 }
710 
712 {
713  /* wait until we have a readable a new frame */
714  SDL_LockMutex(f->mutex);
715  while (f->size - f->rindex_shown <= 0 &&
716  !f->pktq->abort_request) {
717  SDL_CondWait(f->cond, f->mutex);
718  }
719  SDL_UnlockMutex(f->mutex);
720 
721  if (f->pktq->abort_request)
722  return NULL;
723 
724  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
725 }
726 
728 {
729  if (++f->windex == f->max_size)
730  f->windex = 0;
731  SDL_LockMutex(f->mutex);
732  f->size++;
733  SDL_CondSignal(f->cond);
734  SDL_UnlockMutex(f->mutex);
735 }
736 
738 {
739  if (f->keep_last && !f->rindex_shown) {
740  f->rindex_shown = 1;
741  return;
742  }
744  if (++f->rindex == f->max_size)
745  f->rindex = 0;
746  SDL_LockMutex(f->mutex);
747  f->size--;
748  SDL_CondSignal(f->cond);
749  SDL_UnlockMutex(f->mutex);
750 }
751 
752 /* jump back to the previous frame if available by resetting rindex_shown */
754 {
755  int ret = f->rindex_shown;
756  f->rindex_shown = 0;
757  return ret;
758 }
759 
760 /* return the number of undisplayed frames in the queue */
762 {
763  return f->size - f->rindex_shown;
764 }
765 
766 /* return last shown position */
768 {
769  Frame *fp = &f->queue[f->rindex];
770  if (f->rindex_shown && fp->serial == f->pktq->serial)
771  return fp->pos;
772  else
773  return -1;
774 }
775 
776 static void decoder_abort(Decoder *d, FrameQueue *fq)
777 {
779  frame_queue_signal(fq);
780  SDL_WaitThread(d->decoder_tid, NULL);
781  d->decoder_tid = NULL;
783 }
784 
785 static inline void fill_rectangle(SDL_Surface *screen,
786  int x, int y, int w, int h, int color, int update)
787 {
788  SDL_Rect rect;
789  rect.x = x;
790  rect.y = y;
791  rect.w = w;
792  rect.h = h;
793  SDL_FillRect(screen, &rect, color);
794  if (update && w > 0 && h > 0)
795  SDL_UpdateRect(screen, x, y, w, h);
796 }
797 
798 /* draw only the border of a rectangle */
799 static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
800 {
801  int w1, w2, h1, h2;
802 
803  /* fill the background */
804  w1 = x;
805  if (w1 < 0)
806  w1 = 0;
807  w2 = width - (x + w);
808  if (w2 < 0)
809  w2 = 0;
810  h1 = y;
811  if (h1 < 0)
812  h1 = 0;
813  h2 = height - (y + h);
814  if (h2 < 0)
815  h2 = 0;
817  xleft, ytop,
818  w1, height,
819  color, update);
821  xleft + width - w2, ytop,
822  w2, height,
823  color, update);
825  xleft + w1, ytop,
826  width - w1 - w2, h1,
827  color, update);
829  xleft + w1, ytop + height - h2,
830  width - w1 - w2, h2,
831  color, update);
832 }
833 
834 #define ALPHA_BLEND(a, oldp, newp, s)\
835 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
836 
837 
838 
839 #define BPP 1
840 
841 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
842 {
843  int x, y, Y, U, V, A;
844  uint8_t *lum, *cb, *cr;
845  int dstx, dsty, dstw, dsth;
846  const AVPicture *src = &rect->pict;
847 
848  dstw = av_clip(rect->w, 0, imgw);
849  dsth = av_clip(rect->h, 0, imgh);
850  dstx = av_clip(rect->x, 0, imgw - dstw);
851  dsty = av_clip(rect->y, 0, imgh - dsth);
852  lum = dst->data[0] + dstx + dsty * dst->linesize[0];
853  cb = dst->data[1] + dstx/2 + (dsty >> 1) * dst->linesize[1];
854  cr = dst->data[2] + dstx/2 + (dsty >> 1) * dst->linesize[2];
855 
856  for (y = 0; y<dsth; y++) {
857  for (x = 0; x<dstw; x++) {
858  Y = src->data[0][x + y*src->linesize[0]];
859  A = src->data[3][x + y*src->linesize[3]];
860  lum[0] = ALPHA_BLEND(A, lum[0], Y, 0);
861  lum++;
862  }
863  lum += dst->linesize[0] - dstw;
864  }
865 
866  for (y = 0; y<dsth/2; y++) {
867  for (x = 0; x<dstw/2; x++) {
868  U = src->data[1][x + y*src->linesize[1]];
869  V = src->data[2][x + y*src->linesize[2]];
870  A = src->data[3][2*x + 2*y *src->linesize[3]]
871  + src->data[3][2*x + 1 + 2*y *src->linesize[3]]
872  + src->data[3][2*x + 1 + (2*y+1)*src->linesize[3]]
873  + src->data[3][2*x + (2*y+1)*src->linesize[3]];
874  cb[0] = ALPHA_BLEND(A>>2, cb[0], U, 0);
875  cr[0] = ALPHA_BLEND(A>>2, cr[0], V, 0);
876  cb++;
877  cr++;
878  }
879  cb += dst->linesize[1] - dstw/2;
880  cr += dst->linesize[2] - dstw/2;
881  }
882 }
883 
884 static void free_picture(Frame *vp)
885 {
886  if (vp->bmp) {
887  SDL_FreeYUVOverlay(vp->bmp);
888  vp->bmp = NULL;
889  }
890 }
891 
892 static void calculate_display_rect(SDL_Rect *rect,
893  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
894  int pic_width, int pic_height, AVRational pic_sar)
895 {
896  float aspect_ratio;
897  int width, height, x, y;
898 
899  if (pic_sar.num == 0)
900  aspect_ratio = 0;
901  else
902  aspect_ratio = av_q2d(pic_sar);
903 
904  if (aspect_ratio <= 0.0)
905  aspect_ratio = 1.0;
906  aspect_ratio *= (float)pic_width / (float)pic_height;
907 
908  /* XXX: we suppose the screen has a 1.0 pixel ratio */
909  height = scr_height;
910  width = ((int)rint(height * aspect_ratio)) & ~1;
911  if (width > scr_width) {
912  width = scr_width;
913  height = ((int)rint(width / aspect_ratio)) & ~1;
914  }
915  x = (scr_width - width) / 2;
916  y = (scr_height - height) / 2;
917  rect->x = scr_xleft + x;
918  rect->y = scr_ytop + y;
919  rect->w = FFMAX(width, 1);
920  rect->h = FFMAX(height, 1);
921 }
922 
924 {
925  Frame *vp;
926  Frame *sp;
927  AVPicture pict;
928  SDL_Rect rect;
929  int i;
930 
931  vp = frame_queue_peek(&is->pictq);
932  if (vp->bmp) {
933  if (is->subtitle_st) {
934  if (frame_queue_nb_remaining(&is->subpq) > 0) {
935  sp = frame_queue_peek(&is->subpq);
936 
937  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
938  SDL_LockYUVOverlay (vp->bmp);
939 
940  pict.data[0] = vp->bmp->pixels[0];
941  pict.data[1] = vp->bmp->pixels[2];
942  pict.data[2] = vp->bmp->pixels[1];
943 
944  pict.linesize[0] = vp->bmp->pitches[0];
945  pict.linesize[1] = vp->bmp->pitches[2];
946  pict.linesize[2] = vp->bmp->pitches[1];
947 
948  for (i = 0; i < sp->sub.num_rects; i++)
949  blend_subrect(&pict, sp->sub.rects[i],
950  vp->bmp->w, vp->bmp->h);
951 
952  SDL_UnlockYUVOverlay (vp->bmp);
953  }
954  }
955  }
956 
957  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
958 
959  SDL_DisplayYUVOverlay(vp->bmp, &rect);
960 
961  if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
962  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
963  fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
964  is->last_display_rect = rect;
965  }
966  }
967 }
968 
969 static inline int compute_mod(int a, int b)
970 {
971  return a < 0 ? a%b + b : a%b;
972 }
973 
975 {
976  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
977  int ch, channels, h, h2, bgcolor, fgcolor;
978  int64_t time_diff;
979  int rdft_bits, nb_freq;
980 
981  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
982  ;
983  nb_freq = 1 << (rdft_bits - 1);
984 
985  /* compute display index : center on currently output samples */
986  channels = s->audio_tgt.channels;
987  nb_display_channels = channels;
988  if (!s->paused) {
989  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
990  n = 2 * channels;
991  delay = s->audio_write_buf_size;
992  delay /= n;
993 
994  /* to be more precise, we take into account the time spent since
995  the last buffer computation */
996  if (audio_callback_time) {
998  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
999  }
1000 
1001  delay += 2 * data_used;
1002  if (delay < data_used)
1003  delay = data_used;
1004 
1005  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1006  if (s->show_mode == SHOW_MODE_WAVES) {
1007  h = INT_MIN;
1008  for (i = 0; i < 1000; i += channels) {
1009  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1010  int a = s->sample_array[idx];
1011  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1012  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1013  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1014  int score = a - d;
1015  if (h < score && (b ^ c) < 0) {
1016  h = score;
1017  i_start = idx;
1018  }
1019  }
1020  }
1021 
1022  s->last_i_start = i_start;
1023  } else {
1024  i_start = s->last_i_start;
1025  }
1026 
1027  bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
1028  if (s->show_mode == SHOW_MODE_WAVES) {
1030  s->xleft, s->ytop, s->width, s->height,
1031  bgcolor, 0);
1032 
1033  fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
1034 
1035  /* total height for one channel */
1036  h = s->height / nb_display_channels;
1037  /* graph height / 2 */
1038  h2 = (h * 9) / 20;
1039  for (ch = 0; ch < nb_display_channels; ch++) {
1040  i = i_start + ch;
1041  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1042  for (x = 0; x < s->width; x++) {
1043  y = (s->sample_array[i] * h2) >> 15;
1044  if (y < 0) {
1045  y = -y;
1046  ys = y1 - y;
1047  } else {
1048  ys = y1;
1049  }
1051  s->xleft + x, ys, 1, y,
1052  fgcolor, 0);
1053  i += channels;
1054  if (i >= SAMPLE_ARRAY_SIZE)
1055  i -= SAMPLE_ARRAY_SIZE;
1056  }
1057  }
1058 
1059  fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
1060 
1061  for (ch = 1; ch < nb_display_channels; ch++) {
1062  y = s->ytop + ch * h;
1064  s->xleft, y, s->width, 1,
1065  fgcolor, 0);
1066  }
1067  SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
1068  } else {
1069  nb_display_channels= FFMIN(nb_display_channels, 2);
1070  if (rdft_bits != s->rdft_bits) {
1071  av_rdft_end(s->rdft);
1072  av_free(s->rdft_data);
1073  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
1074  s->rdft_bits = rdft_bits;
1075  s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
1076  }
1077  if (!s->rdft || !s->rdft_data){
1078  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1079  s->show_mode = SHOW_MODE_WAVES;
1080  } else {
1081  FFTSample *data[2];
1082  for (ch = 0; ch < nb_display_channels; ch++) {
1083  data[ch] = s->rdft_data + 2 * nb_freq * ch;
1084  i = i_start + ch;
1085  for (x = 0; x < 2 * nb_freq; x++) {
1086  double w = (x-nb_freq) * (1.0 / nb_freq);
1087  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
1088  i += channels;
1089  if (i >= SAMPLE_ARRAY_SIZE)
1090  i -= SAMPLE_ARRAY_SIZE;
1091  }
1092  av_rdft_calc(s->rdft, data[ch]);
1093  }
1094  /* Least efficient way to do this, we should of course
1095  * directly access it but it is more than fast enough. */
1096  for (y = 0; y < s->height; y++) {
1097  double w = 1 / sqrt(nb_freq);
1098  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1099  int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
1100  + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
1101  a = FFMIN(a, 255);
1102  b = FFMIN(b, 255);
1103  fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
1104 
1106  s->xpos, s->height-y, 1, 1,
1107  fgcolor, 0);
1108  }
1109  }
1110  SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
1111  if (!s->paused)
1112  s->xpos++;
1113  if (s->xpos >= s->width)
1114  s->xpos= s->xleft;
1115  }
1116 }
1117 
1118 static void stream_close(VideoState *is)
1119 {
1120  /* XXX: use a special url_shutdown call to abort parse cleanly */
1121  is->abort_request = 1;
1122  SDL_WaitThread(is->read_tid, NULL);
1126 
1127  /* free all pictures */
1128  frame_queue_destory(&is->pictq);
1129  frame_queue_destory(&is->sampq);
1130  frame_queue_destory(&is->subpq);
1131  SDL_DestroyCond(is->continue_read_thread);
1132 #if !CONFIG_AVFILTER
1134 #endif
1136  av_free(is);
1137 }
1138 
1139 static void do_exit(VideoState *is)
1140 {
1141  if (is) {
1142  stream_close(is);
1143  }
1145  uninit_opts();
1146 #if CONFIG_AVFILTER
1147  av_freep(&vfilters_list);
1148 #endif
1150  if (show_status)
1151  printf("\n");
1152  SDL_Quit();
1153  av_log(NULL, AV_LOG_QUIET, "%s", "");
1154  exit(0);
1155 }
1156 
1157 static void sigterm_handler(int sig)
1158 {
1159  exit(123);
1160 }
1161 
1163 {
1164  SDL_Rect rect;
1165  calculate_display_rect(&rect, 0, 0, INT_MAX, height, width, height, sar);
1166  default_width = rect.w;
1167  default_height = rect.h;
1168 }
1169 
1170 static int video_open(VideoState *is, int force_set_video_mode, Frame *vp)
1171 {
1172  int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
1173  int w,h;
1174 
1175  if (is_full_screen) flags |= SDL_FULLSCREEN;
1176  else flags |= SDL_RESIZABLE;
1177 
1178  if (vp && vp->width)
1179  set_default_window_size(vp->width, vp->height, vp->sar);
1180 
1182  w = fs_screen_width;
1183  h = fs_screen_height;
1184  } else if (!is_full_screen && screen_width) {
1185  w = screen_width;
1186  h = screen_height;
1187  } else {
1188  w = default_width;
1189  h = default_height;
1190  }
1191  w = FFMIN(16383, w);
1192  if (screen && is->width == screen->w && screen->w == w
1193  && is->height== screen->h && screen->h == h && !force_set_video_mode)
1194  return 0;
1195  screen = SDL_SetVideoMode(w, h, 0, flags);
1196  if (!screen) {
1197  av_log(NULL, AV_LOG_FATAL, "SDL: could not set video mode - exiting\n");
1198  do_exit(is);
1199  }
1200  if (!window_title)
1202  SDL_WM_SetCaption(window_title, window_title);
1203 
1204  is->width = screen->w;
1205  is->height = screen->h;
1206 
1207  return 0;
1208 }
1209 
1210 /* display the current picture, if any */
1211 static void video_display(VideoState *is)
1212 {
1213  if (!screen)
1214  video_open(is, 0, NULL);
1215  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1216  video_audio_display(is);
1217  else if (is->video_st)
1218  video_image_display(is);
1219 }
1220 
1221 static double get_clock(Clock *c)
1222 {
1223  if (*c->queue_serial != c->serial)
1224  return NAN;
1225  if (c->paused) {
1226  return c->pts;
1227  } else {
1228  double time = av_gettime_relative() / 1000000.0;
1229  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1230  }
1231 }
1232 
1233 static void set_clock_at(Clock *c, double pts, int serial, double time)
1234 {
1235  c->pts = pts;
1236  c->last_updated = time;
1237  c->pts_drift = c->pts - time;
1238  c->serial = serial;
1239 }
1240 
1241 static void set_clock(Clock *c, double pts, int serial)
1242 {
1243  double time = av_gettime_relative() / 1000000.0;
1244  set_clock_at(c, pts, serial, time);
1245 }
1246 
1247 static void set_clock_speed(Clock *c, double speed)
1248 {
1249  set_clock(c, get_clock(c), c->serial);
1250  c->speed = speed;
1251 }
1252 
1253 static void init_clock(Clock *c, int *queue_serial)
1254 {
1255  c->speed = 1.0;
1256  c->paused = 0;
1257  c->queue_serial = queue_serial;
1258  set_clock(c, NAN, -1);
1259 }
1260 
1261 static void sync_clock_to_slave(Clock *c, Clock *slave)
1262 {
1263  double clock = get_clock(c);
1264  double slave_clock = get_clock(slave);
1265  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1266  set_clock(c, slave_clock, slave->serial);
1267 }
1268 
1270  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1271  if (is->video_st)
1272  return AV_SYNC_VIDEO_MASTER;
1273  else
1274  return AV_SYNC_AUDIO_MASTER;
1275  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1276  if (is->audio_st)
1277  return AV_SYNC_AUDIO_MASTER;
1278  else
1279  return AV_SYNC_EXTERNAL_CLOCK;
1280  } else {
1281  return AV_SYNC_EXTERNAL_CLOCK;
1282  }
1283 }
1284 
1285 /* get the current master clock value */
1286 static double get_master_clock(VideoState *is)
1287 {
1288  double val;
1289 
1290  switch (get_master_sync_type(is)) {
1291  case AV_SYNC_VIDEO_MASTER:
1292  val = get_clock(&is->vidclk);
1293  break;
1294  case AV_SYNC_AUDIO_MASTER:
1295  val = get_clock(&is->audclk);
1296  break;
1297  default:
1298  val = get_clock(&is->extclk);
1299  break;
1300  }
1301  return val;
1302 }
1303 
1305  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1308  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1311  } else {
1312  double speed = is->extclk.speed;
1313  if (speed != 1.0)
1314  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1315  }
1316 }
1317 
1318 /* seek in the stream */
1319 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1320 {
1321  if (!is->seek_req) {
1322  is->seek_pos = pos;
1323  is->seek_rel = rel;
1324  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1325  if (seek_by_bytes)
1327  is->seek_req = 1;
1328  SDL_CondSignal(is->continue_read_thread);
1329  }
1330 }
1331 
1332 /* pause or resume the video */
1334 {
1335  if (is->paused) {
1336  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1337  if (is->read_pause_return != AVERROR(ENOSYS)) {
1338  is->vidclk.paused = 0;
1339  }
1340  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1341  }
1342  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1343  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1344 }
1345 
1346 static void toggle_pause(VideoState *is)
1347 {
1348  stream_toggle_pause(is);
1349  is->step = 0;
1350 }
1351 
1353 {
1354  /* if the stream is paused unpause it, then step */
1355  if (is->paused)
1356  stream_toggle_pause(is);
1357  is->step = 1;
1358 }
1359 
1360 static double compute_target_delay(double delay, VideoState *is)
1361 {
1362  double sync_threshold, diff = 0;
1363 
1364  /* update delay to follow master synchronisation source */
1366  /* if video is slave, we try to correct big delays by
1367  duplicating or deleting a frame */
1368  diff = get_clock(&is->vidclk) - get_master_clock(is);
1369 
1370  /* skip or repeat frame. We take into account the
1371  delay to compute the threshold. I still don't know
1372  if it is the best guess */
1373  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1374  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1375  if (diff <= -sync_threshold)
1376  delay = FFMAX(0, delay + diff);
1377  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1378  delay = delay + diff;
1379  else if (diff >= sync_threshold)
1380  delay = 2 * delay;
1381  }
1382  }
1383 
1384  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1385  delay, -diff);
1386 
1387  return delay;
1388 }
1389 
1390 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1391  if (vp->serial == nextvp->serial) {
1392  double duration = nextvp->pts - vp->pts;
1393  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1394  return vp->duration;
1395  else
1396  return duration;
1397  } else {
1398  return 0.0;
1399  }
1400 }
1401 
1402 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1403  /* update current video pts */
1404  set_clock(&is->vidclk, pts, serial);
1405  sync_clock_to_slave(&is->extclk, &is->vidclk);
1406 }
1407 
1408 /* called to display each frame */
1409 static void video_refresh(void *opaque, double *remaining_time)
1410 {
1411  VideoState *is = opaque;
1412  double time;
1413 
1414  Frame *sp, *sp2;
1415 
1416  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1418 
1419  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1420  time = av_gettime_relative() / 1000000.0;
1421  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1422  video_display(is);
1423  is->last_vis_time = time;
1424  }
1425  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1426  }
1427 
1428  if (is->video_st) {
1429  int redisplay = 0;
1430  if (is->force_refresh)
1431  redisplay = frame_queue_prev(&is->pictq);
1432 retry:
1433  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1434  // nothing to do, no picture to display in the queue
1435  } else {
1436  double last_duration, duration, delay;
1437  Frame *vp, *lastvp;
1438 
1439  /* dequeue the picture */
1440  lastvp = frame_queue_peek_last(&is->pictq);
1441  vp = frame_queue_peek(&is->pictq);
1442 
1443  if (vp->serial != is->videoq.serial) {
1444  frame_queue_next(&is->pictq);
1445  redisplay = 0;
1446  goto retry;
1447  }
1448 
1449  if (lastvp->serial != vp->serial && !redisplay)
1450  is->frame_timer = av_gettime_relative() / 1000000.0;
1451 
1452  if (is->paused)
1453  goto display;
1454 
1455  /* compute nominal last_duration */
1456  last_duration = vp_duration(is, lastvp, vp);
1457  if (redisplay)
1458  delay = 0.0;
1459  else
1460  delay = compute_target_delay(last_duration, is);
1461 
1462  time= av_gettime_relative()/1000000.0;
1463  if (time < is->frame_timer + delay && !redisplay) {
1464  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1465  return;
1466  }
1467 
1468  is->frame_timer += delay;
1469  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1470  is->frame_timer = time;
1471 
1472  SDL_LockMutex(is->pictq.mutex);
1473  if (!redisplay && !isnan(vp->pts))
1474  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1475  SDL_UnlockMutex(is->pictq.mutex);
1476 
1477  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1478  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1479  duration = vp_duration(is, vp, nextvp);
1480  if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1481  if (!redisplay)
1482  is->frame_drops_late++;
1483  frame_queue_next(&is->pictq);
1484  redisplay = 0;
1485  goto retry;
1486  }
1487  }
1488 
1489  if (is->subtitle_st) {
1490  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1491  sp = frame_queue_peek(&is->subpq);
1492 
1493  if (frame_queue_nb_remaining(&is->subpq) > 1)
1494  sp2 = frame_queue_peek_next(&is->subpq);
1495  else
1496  sp2 = NULL;
1497 
1498  if (sp->serial != is->subtitleq.serial
1499  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1500  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1501  {
1502  frame_queue_next(&is->subpq);
1503  } else {
1504  break;
1505  }
1506  }
1507  }
1508 
1509 display:
1510  /* display picture */
1511  if (!display_disable && is->show_mode == SHOW_MODE_VIDEO)
1512  video_display(is);
1513 
1514  frame_queue_next(&is->pictq);
1515 
1516  if (is->step && !is->paused)
1517  stream_toggle_pause(is);
1518  }
1519  }
1520  is->force_refresh = 0;
1521  if (show_status) {
1522  static int64_t last_time;
1523  int64_t cur_time;
1524  int aqsize, vqsize, sqsize;
1525  double av_diff;
1526 
1527  cur_time = av_gettime_relative();
1528  if (!last_time || (cur_time - last_time) >= 30000) {
1529  aqsize = 0;
1530  vqsize = 0;
1531  sqsize = 0;
1532  if (is->audio_st)
1533  aqsize = is->audioq.size;
1534  if (is->video_st)
1535  vqsize = is->videoq.size;
1536  if (is->subtitle_st)
1537  sqsize = is->subtitleq.size;
1538  av_diff = 0;
1539  if (is->audio_st && is->video_st)
1540  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1541  else if (is->video_st)
1542  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1543  else if (is->audio_st)
1544  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1546  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1547  get_master_clock(is),
1548  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1549  av_diff,
1551  aqsize / 1024,
1552  vqsize / 1024,
1553  sqsize,
1556  fflush(stdout);
1557  last_time = cur_time;
1558  }
1559  }
1560 }
1561 
1562 /* allocate a picture (needs to do that in main thread to avoid
1563  potential locking problems */
1564 static void alloc_picture(VideoState *is)
1565 {
1566  Frame *vp;
1567  int64_t bufferdiff;
1568 
1569  vp = &is->pictq.queue[is->pictq.windex];
1570 
1571  free_picture(vp);
1572 
1573  video_open(is, 0, vp);
1574 
1575  vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1576  SDL_YV12_OVERLAY,
1577  screen);
1578  bufferdiff = vp->bmp ? FFMAX(vp->bmp->pixels[0], vp->bmp->pixels[1]) - FFMIN(vp->bmp->pixels[0], vp->bmp->pixels[1]) : 0;
1579  if (!vp->bmp || vp->bmp->pitches[0] < vp->width || bufferdiff < (int64_t)vp->height * vp->bmp->pitches[0]) {
1580  /* SDL allocates a buffer smaller than requested if the video
1581  * overlay hardware is unable to support the requested size. */
1583  "Error: the video system does not support an image\n"
1584  "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1585  "to reduce the image size.\n", vp->width, vp->height );
1586  do_exit(is);
1587  }
1588 
1589  SDL_LockMutex(is->pictq.mutex);
1590  vp->allocated = 1;
1591  SDL_CondSignal(is->pictq.cond);
1592  SDL_UnlockMutex(is->pictq.mutex);
1593 }
1594 
1595 static void duplicate_right_border_pixels(SDL_Overlay *bmp) {
1596  int i, width, height;
1597  Uint8 *p, *maxp;
1598  for (i = 0; i < 3; i++) {
1599  width = bmp->w;
1600  height = bmp->h;
1601  if (i > 0) {
1602  width >>= 1;
1603  height >>= 1;
1604  }
1605  if (bmp->pitches[i] > width) {
1606  maxp = bmp->pixels[i] + bmp->pitches[i] * height - 1;
1607  for (p = bmp->pixels[i] + width - 1; p < maxp; p += bmp->pitches[i])
1608  *(p+1) = *p;
1609  }
1610  }
1611 }
1612 
1613 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1614 {
1615  Frame *vp;
1616 
1617 #if defined(DEBUG_SYNC) && 0
1618  printf("frame_type=%c pts=%0.3f\n",
1619  av_get_picture_type_char(src_frame->pict_type), pts);
1620 #endif
1621 
1622  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1623  return -1;
1624 
1625  vp->sar = src_frame->sample_aspect_ratio;
1626 
1627  /* alloc or resize hardware picture buffer */
1628  if (!vp->bmp || vp->reallocate || !vp->allocated ||
1629  vp->width != src_frame->width ||
1630  vp->height != src_frame->height) {
1631  SDL_Event event;
1632 
1633  vp->allocated = 0;
1634  vp->reallocate = 0;
1635  vp->width = src_frame->width;
1636  vp->height = src_frame->height;
1637 
1638  /* the allocation must be done in the main thread to avoid
1639  locking problems. */
1640  event.type = FF_ALLOC_EVENT;
1641  event.user.data1 = is;
1642  SDL_PushEvent(&event);
1643 
1644  /* wait until the picture is allocated */
1645  SDL_LockMutex(is->pictq.mutex);
1646  while (!vp->allocated && !is->videoq.abort_request) {
1647  SDL_CondWait(is->pictq.cond, is->pictq.mutex);
1648  }
1649  /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1650  if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1651  while (!vp->allocated && !is->abort_request) {
1652  SDL_CondWait(is->pictq.cond, is->pictq.mutex);
1653  }
1654  }
1655  SDL_UnlockMutex(is->pictq.mutex);
1656 
1657  if (is->videoq.abort_request)
1658  return -1;
1659  }
1660 
1661  /* if the frame is not skipped, then display it */
1662  if (vp->bmp) {
1663  AVPicture pict = { { 0 } };
1664 
1665  /* get a pointer on the bitmap */
1666  SDL_LockYUVOverlay (vp->bmp);
1667 
1668  pict.data[0] = vp->bmp->pixels[0];
1669  pict.data[1] = vp->bmp->pixels[2];
1670  pict.data[2] = vp->bmp->pixels[1];
1671 
1672  pict.linesize[0] = vp->bmp->pitches[0];
1673  pict.linesize[1] = vp->bmp->pitches[2];
1674  pict.linesize[2] = vp->bmp->pitches[1];
1675 
1676 #if CONFIG_AVFILTER
1677  // FIXME use direct rendering
1678  av_picture_copy(&pict, (AVPicture *)src_frame,
1679  src_frame->format, vp->width, vp->height);
1680 #else
1681  {
1682  AVDictionaryEntry *e = av_dict_get(sws_dict, "sws_flags", NULL, 0);
1683  if (e) {
1684  const AVClass *class = sws_get_class();
1685  const AVOption *o = av_opt_find(&class, "sws_flags", NULL, 0,
1687  int ret = av_opt_eval_flags(&class, o, e->value, &sws_flags);
1688  if (ret < 0)
1689  exit(1);
1690  }
1691  }
1692 
1694  vp->width, vp->height, src_frame->format, vp->width, vp->height,
1696  if (!is->img_convert_ctx) {
1697  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1698  exit(1);
1699  }
1700  sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1701  0, vp->height, pict.data, pict.linesize);
1702 #endif
1703  /* workaround SDL PITCH_WORKAROUND */
1705  /* update the bitmap content */
1706  SDL_UnlockYUVOverlay(vp->bmp);
1707 
1708  vp->pts = pts;
1709  vp->duration = duration;
1710  vp->pos = pos;
1711  vp->serial = serial;
1712 
1713  /* now we can update the picture count */
1714  frame_queue_push(&is->pictq);
1715  }
1716  return 0;
1717 }
1718 
1720 {
1721  int got_picture;
1722 
1723  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1724  return -1;
1725 
1726  if (got_picture) {
1727  double dpts = NAN;
1728 
1729  if (frame->pts != AV_NOPTS_VALUE)
1730  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1731 
1732  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1733 
1734  is->viddec_width = frame->width;
1735  is->viddec_height = frame->height;
1736 
1738  if (frame->pts != AV_NOPTS_VALUE) {
1739  double diff = dpts - get_master_clock(is);
1740  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1741  diff - is->frame_last_filter_delay < 0 &&
1742  is->viddec.pkt_serial == is->vidclk.serial &&
1743  is->videoq.nb_packets) {
1744  is->frame_drops_early++;
1745  av_frame_unref(frame);
1746  got_picture = 0;
1747  }
1748  }
1749  }
1750  }
1751 
1752  return got_picture;
1753 }
1754 
1755 #if CONFIG_AVFILTER
1756 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1757  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1758 {
1759  int ret, i;
1760  int nb_filters = graph->nb_filters;
1762 
1763  if (filtergraph) {
1764  outputs = avfilter_inout_alloc();
1765  inputs = avfilter_inout_alloc();
1766  if (!outputs || !inputs) {
1767  ret = AVERROR(ENOMEM);
1768  goto fail;
1769  }
1770 
1771  outputs->name = av_strdup("in");
1772  outputs->filter_ctx = source_ctx;
1773  outputs->pad_idx = 0;
1774  outputs->next = NULL;
1775 
1776  inputs->name = av_strdup("out");
1777  inputs->filter_ctx = sink_ctx;
1778  inputs->pad_idx = 0;
1779  inputs->next = NULL;
1780 
1781  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1782  goto fail;
1783  } else {
1784  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1785  goto fail;
1786  }
1787 
1788  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1789  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1790  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1791 
1792  ret = avfilter_graph_config(graph, NULL);
1793 fail:
1794  avfilter_inout_free(&outputs);
1795  avfilter_inout_free(&inputs);
1796  return ret;
1797 }
1798 
1799 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1800 {
1801  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1802  char sws_flags_str[512] = "";
1803  char buffersrc_args[256];
1804  int ret;
1805  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1806  AVCodecContext *codec = is->video_st->codec;
1807  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1808  AVDictionaryEntry *e = NULL;
1809 
1810  while ((e = av_dict_get(sws_dict, "", e, AV_DICT_IGNORE_SUFFIX))) {
1811  if (!strcmp(e->key, "sws_flags")) {
1812  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1813  } else
1814  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1815  }
1816  if (strlen(sws_flags_str))
1817  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1818 
1819  graph->scale_sws_opts = av_strdup(sws_flags_str);
1820 
1821  snprintf(buffersrc_args, sizeof(buffersrc_args),
1822  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1823  frame->width, frame->height, frame->format,
1825  codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1826  if (fr.num && fr.den)
1827  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1828 
1829  if ((ret = avfilter_graph_create_filter(&filt_src,
1830  avfilter_get_by_name("buffer"),
1831  "ffplay_buffer", buffersrc_args, NULL,
1832  graph)) < 0)
1833  goto fail;
1834 
1835  ret = avfilter_graph_create_filter(&filt_out,
1836  avfilter_get_by_name("buffersink"),
1837  "ffplay_buffersink", NULL, NULL, graph);
1838  if (ret < 0)
1839  goto fail;
1840 
1841  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1842  goto fail;
1843 
1844  last_filter = filt_out;
1845 
1846 /* Note: this macro adds a filter before the lastly added filter, so the
1847  * processing order of the filters is in reverse */
1848 #define INSERT_FILT(name, arg) do { \
1849  AVFilterContext *filt_ctx; \
1850  \
1851  ret = avfilter_graph_create_filter(&filt_ctx, \
1852  avfilter_get_by_name(name), \
1853  "ffplay_" name, arg, NULL, graph); \
1854  if (ret < 0) \
1855  goto fail; \
1856  \
1857  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1858  if (ret < 0) \
1859  goto fail; \
1860  \
1861  last_filter = filt_ctx; \
1862 } while (0)
1863 
1864  /* SDL YUV code is not handling odd width/height for some driver
1865  * combinations, therefore we crop the picture to an even width/height. */
1866  INSERT_FILT("crop", "floor(in_w/2)*2:floor(in_h/2)*2");
1867 
1868  if (autorotate) {
1869  double theta = get_rotation(is->video_st);
1870 
1871  if (fabs(theta - 90) < 1.0) {
1872  INSERT_FILT("transpose", "clock");
1873  } else if (fabs(theta - 180) < 1.0) {
1874  INSERT_FILT("hflip", NULL);
1875  INSERT_FILT("vflip", NULL);
1876  } else if (fabs(theta - 270) < 1.0) {
1877  INSERT_FILT("transpose", "cclock");
1878  } else if (fabs(theta) > 1.0) {
1879  char rotate_buf[64];
1880  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1881  INSERT_FILT("rotate", rotate_buf);
1882  }
1883  }
1884 
1885  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1886  goto fail;
1887 
1888  is->in_video_filter = filt_src;
1889  is->out_video_filter = filt_out;
1890 
1891 fail:
1892  return ret;
1893 }
1894 
1895 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1896 {
1898  int sample_rates[2] = { 0, -1 };
1899  int64_t channel_layouts[2] = { 0, -1 };
1900  int channels[2] = { 0, -1 };
1901  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1902  char aresample_swr_opts[512] = "";
1903  AVDictionaryEntry *e = NULL;
1904  char asrc_args[256];
1905  int ret;
1906 
1907  avfilter_graph_free(&is->agraph);
1908  if (!(is->agraph = avfilter_graph_alloc()))
1909  return AVERROR(ENOMEM);
1910 
1911  while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
1912  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
1913  if (strlen(aresample_swr_opts))
1914  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
1915  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
1916 
1917  ret = snprintf(asrc_args, sizeof(asrc_args),
1918  "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1919  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1920  is->audio_filter_src.channels,
1921  1, is->audio_filter_src.freq);
1922  if (is->audio_filter_src.channel_layout)
1923  snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1924  ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
1925 
1926  ret = avfilter_graph_create_filter(&filt_asrc,
1927  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1928  asrc_args, NULL, is->agraph);
1929  if (ret < 0)
1930  goto end;
1931 
1932 
1933  ret = avfilter_graph_create_filter(&filt_asink,
1934  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1935  NULL, NULL, is->agraph);
1936  if (ret < 0)
1937  goto end;
1938 
1939  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1940  goto end;
1941  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1942  goto end;
1943 
1944  if (force_output_format) {
1945  channel_layouts[0] = is->audio_tgt.channel_layout;
1946  channels [0] = is->audio_tgt.channels;
1947  sample_rates [0] = is->audio_tgt.freq;
1948  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
1949  goto end;
1950  if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1951  goto end;
1952  if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1953  goto end;
1954  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1955  goto end;
1956  }
1957 
1958 
1959  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
1960  goto end;
1961 
1962  is->in_audio_filter = filt_asrc;
1963  is->out_audio_filter = filt_asink;
1964 
1965 end:
1966  if (ret < 0)
1967  avfilter_graph_free(&is->agraph);
1968  return ret;
1969 }
1970 #endif /* CONFIG_AVFILTER */
1971 
1972 static int audio_thread(void *arg)
1973 {
1974  VideoState *is = arg;
1975  AVFrame *frame = av_frame_alloc();
1976  Frame *af;
1977 #if CONFIG_AVFILTER
1978  int last_serial = -1;
1979  int64_t dec_channel_layout;
1980  int reconfigure;
1981 #endif
1982  int got_frame = 0;
1983  AVRational tb;
1984  int ret = 0;
1985 
1986  if (!frame)
1987  return AVERROR(ENOMEM);
1988 
1989  do {
1990  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
1991  goto the_end;
1992 
1993  if (got_frame) {
1994  tb = (AVRational){1, frame->sample_rate};
1995 
1996 #if CONFIG_AVFILTER
1997  dec_channel_layout = get_valid_channel_layout(frame->channel_layout, av_frame_get_channels(frame));
1998 
1999  reconfigure =
2000  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2001  frame->format, av_frame_get_channels(frame)) ||
2002  is->audio_filter_src.channel_layout != dec_channel_layout ||
2003  is->audio_filter_src.freq != frame->sample_rate ||
2004  is->auddec.pkt_serial != last_serial;
2005 
2006  if (reconfigure) {
2007  char buf1[1024], buf2[1024];
2008  av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2009  av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2011  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2012  is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2014 
2015  is->audio_filter_src.fmt = frame->format;
2016  is->audio_filter_src.channels = av_frame_get_channels(frame);
2017  is->audio_filter_src.channel_layout = dec_channel_layout;
2018  is->audio_filter_src.freq = frame->sample_rate;
2019  last_serial = is->auddec.pkt_serial;
2020 
2021  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2022  goto the_end;
2023  }
2024 
2025  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2026  goto the_end;
2027 
2028  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2029  tb = is->out_audio_filter->inputs[0]->time_base;
2030 #endif
2031  if (!(af = frame_queue_peek_writable(&is->sampq)))
2032  goto the_end;
2033 
2034  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2035  af->pos = av_frame_get_pkt_pos(frame);
2036  af->serial = is->auddec.pkt_serial;
2037  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2038 
2039  av_frame_move_ref(af->frame, frame);
2040  frame_queue_push(&is->sampq);
2041 
2042 #if CONFIG_AVFILTER
2043  if (is->audioq.serial != is->auddec.pkt_serial)
2044  break;
2045  }
2046  if (ret == AVERROR_EOF)
2047  is->auddec.finished = is->auddec.pkt_serial;
2048 #endif
2049  }
2050  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2051  the_end:
2052 #if CONFIG_AVFILTER
2053  avfilter_graph_free(&is->agraph);
2054 #endif
2055  av_frame_free(&frame);
2056  return ret;
2057 }
2058 
2059 static void decoder_start(Decoder *d, int (*fn)(void *), void *arg)
2060 {
2062  d->decoder_tid = SDL_CreateThread(fn, arg);
2063 }
2064 
2065 static int video_thread(void *arg)
2066 {
2067  VideoState *is = arg;
2068  AVFrame *frame = av_frame_alloc();
2069  double pts;
2070  double duration;
2071  int ret;
2073  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2074 
2075 #if CONFIG_AVFILTER
2077  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2078  int last_w = 0;
2079  int last_h = 0;
2080  enum AVPixelFormat last_format = -2;
2081  int last_serial = -1;
2082  int last_vfilter_idx = 0;
2083  if (!graph) {
2084  av_frame_free(&frame);
2085  return AVERROR(ENOMEM);
2086  }
2087 
2088 #endif
2089 
2090  if (!frame) {
2091 #if CONFIG_AVFILTER
2092  avfilter_graph_free(&graph);
2093 #endif
2094  return AVERROR(ENOMEM);
2095  }
2096 
2097  for (;;) {
2098  ret = get_video_frame(is, frame);
2099  if (ret < 0)
2100  goto the_end;
2101  if (!ret)
2102  continue;
2103 
2104 #if CONFIG_AVFILTER
2105  if ( last_w != frame->width
2106  || last_h != frame->height
2107  || last_format != frame->format
2108  || last_serial != is->viddec.pkt_serial
2109  || last_vfilter_idx != is->vfilter_idx) {
2111  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2112  last_w, last_h,
2113  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2114  frame->width, frame->height,
2115  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2116  avfilter_graph_free(&graph);
2117  graph = avfilter_graph_alloc();
2118  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2119  SDL_Event event;
2120  event.type = FF_QUIT_EVENT;
2121  event.user.data1 = is;
2122  SDL_PushEvent(&event);
2123  goto the_end;
2124  }
2125  filt_in = is->in_video_filter;
2126  filt_out = is->out_video_filter;
2127  last_w = frame->width;
2128  last_h = frame->height;
2129  last_format = frame->format;
2130  last_serial = is->viddec.pkt_serial;
2131  last_vfilter_idx = is->vfilter_idx;
2132  frame_rate = filt_out->inputs[0]->frame_rate;
2133  }
2134 
2135  ret = av_buffersrc_add_frame(filt_in, frame);
2136  if (ret < 0)
2137  goto the_end;
2138 
2139  while (ret >= 0) {
2140  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2141 
2142  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2143  if (ret < 0) {
2144  if (ret == AVERROR_EOF)
2145  is->viddec.finished = is->viddec.pkt_serial;
2146  ret = 0;
2147  break;
2148  }
2149 
2151  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2152  is->frame_last_filter_delay = 0;
2153  tb = filt_out->inputs[0]->time_base;
2154 #endif
2155  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2156  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2157  ret = queue_picture(is, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec.pkt_serial);
2158  av_frame_unref(frame);
2159 #if CONFIG_AVFILTER
2160  }
2161 #endif
2162 
2163  if (ret < 0)
2164  goto the_end;
2165  }
2166  the_end:
2167 #if CONFIG_AVFILTER
2168  avfilter_graph_free(&graph);
2169 #endif
2170  av_frame_free(&frame);
2171  return 0;
2172 }
2173 
2174 static int subtitle_thread(void *arg)
2175 {
2176  VideoState *is = arg;
2177  Frame *sp;
2178  int got_subtitle;
2179  double pts;
2180  int i;
2181 
2182  for (;;) {
2183  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2184  return 0;
2185 
2186  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2187  break;
2188 
2189  pts = 0;
2190 
2191  if (got_subtitle && sp->sub.format == 0) {
2192  if (sp->sub.pts != AV_NOPTS_VALUE)
2193  pts = sp->sub.pts / (double)AV_TIME_BASE;
2194  sp->pts = pts;
2195  sp->serial = is->subdec.pkt_serial;
2196 
2197  for (i = 0; i < sp->sub.num_rects; i++)
2198  {
2199  int in_w = sp->sub.rects[i]->w;
2200  int in_h = sp->sub.rects[i]->h;
2201  int subw = is->subdec.avctx->width ? is->subdec.avctx->width : is->viddec_width;
2202  int subh = is->subdec.avctx->height ? is->subdec.avctx->height : is->viddec_height;
2203  int out_w = is->viddec_width ? in_w * is->viddec_width / subw : in_w;
2204  int out_h = is->viddec_height ? in_h * is->viddec_height / subh : in_h;
2205  AVPicture newpic;
2206 
2207  //can not use avpicture_alloc as it is not compatible with avsubtitle_free()
2209  newpic.data[0] = av_malloc(newpic.linesize[0] * out_h);
2210  newpic.data[3] = av_malloc(newpic.linesize[3] * out_h);
2211  newpic.data[1] = av_malloc(newpic.linesize[1] * ((out_h+1)/2));
2212  newpic.data[2] = av_malloc(newpic.linesize[2] * ((out_h+1)/2));
2213 
2215  in_w, in_h, AV_PIX_FMT_PAL8, out_w, out_h,
2217  if (!is->sub_convert_ctx || !newpic.data[0] || !newpic.data[3] ||
2218  !newpic.data[1] || !newpic.data[2]
2219  ) {
2220  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the sub conversion context\n");
2221  exit(1);
2222  }
2224  (void*)sp->sub.rects[i]->pict.data, sp->sub.rects[i]->pict.linesize,
2225  0, in_h, newpic.data, newpic.linesize);
2226 
2227  av_free(sp->sub.rects[i]->pict.data[0]);
2228  av_free(sp->sub.rects[i]->pict.data[1]);
2229  sp->sub.rects[i]->pict = newpic;
2230  sp->sub.rects[i]->w = out_w;
2231  sp->sub.rects[i]->h = out_h;
2232  sp->sub.rects[i]->x = sp->sub.rects[i]->x * out_w / in_w;
2233  sp->sub.rects[i]->y = sp->sub.rects[i]->y * out_h / in_h;
2234  }
2235 
2236  /* now we can update the picture count */
2237  frame_queue_push(&is->subpq);
2238  } else if (got_subtitle) {
2239  avsubtitle_free(&sp->sub);
2240  }
2241  }
2242  return 0;
2243 }
2244 
2245 /* copy samples for viewing in editor window */
2246 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2247 {
2248  int size, len;
2249 
2250  size = samples_size / sizeof(short);
2251  while (size > 0) {
2253  if (len > size)
2254  len = size;
2255  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2256  samples += len;
2257  is->sample_array_index += len;
2259  is->sample_array_index = 0;
2260  size -= len;
2261  }
2262 }
2263 
2264 /* return the wanted number of samples to get better sync if sync_type is video
2265  * or external master clock */
2266 static int synchronize_audio(VideoState *is, int nb_samples)
2267 {
2268  int wanted_nb_samples = nb_samples;
2269 
2270  /* if not master, then we try to remove or add samples to correct the clock */
2272  double diff, avg_diff;
2273  int min_nb_samples, max_nb_samples;
2274 
2275  diff = get_clock(&is->audclk) - get_master_clock(is);
2276 
2277  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2278  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2280  /* not enough measures to have a correct estimate */
2281  is->audio_diff_avg_count++;
2282  } else {
2283  /* estimate the A-V difference */
2284  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2285 
2286  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2287  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2288  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2289  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2290  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2291  }
2292  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2293  diff, avg_diff, wanted_nb_samples - nb_samples,
2295  }
2296  } else {
2297  /* too big difference : may be initial PTS errors, so
2298  reset A-V filter */
2299  is->audio_diff_avg_count = 0;
2300  is->audio_diff_cum = 0;
2301  }
2302  }
2303 
2304  return wanted_nb_samples;
2305 }
2306 
2307 /**
2308  * Decode one audio frame and return its uncompressed size.
2309  *
2310  * The processed audio frame is decoded, converted if required, and
2311  * stored in is->audio_buf, with size in bytes given by the return
2312  * value.
2313  */
2315 {
2316  int data_size, resampled_data_size;
2317  int64_t dec_channel_layout;
2318  av_unused double audio_clock0;
2319  int wanted_nb_samples;
2320  Frame *af;
2321 
2322  if (is->paused)
2323  return -1;
2324 
2325  do {
2326 #if defined(_WIN32)
2327  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2329  return -1;
2330  av_usleep (1000);
2331  }
2332 #endif
2333  if (!(af = frame_queue_peek_readable(&is->sampq)))
2334  return -1;
2335  frame_queue_next(&is->sampq);
2336  } while (af->serial != is->audioq.serial);
2337 
2339  af->frame->nb_samples,
2340  af->frame->format, 1);
2341 
2342  dec_channel_layout =
2345  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2346 
2347  if (af->frame->format != is->audio_src.fmt ||
2348  dec_channel_layout != is->audio_src.channel_layout ||
2349  af->frame->sample_rate != is->audio_src.freq ||
2350  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2351  swr_free(&is->swr_ctx);
2354  dec_channel_layout, af->frame->format, af->frame->sample_rate,
2355  0, NULL);
2356  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2358  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2361  swr_free(&is->swr_ctx);
2362  return -1;
2363  }
2364  is->audio_src.channel_layout = dec_channel_layout;
2366  is->audio_src.freq = af->frame->sample_rate;
2367  is->audio_src.fmt = af->frame->format;
2368  }
2369 
2370  if (is->swr_ctx) {
2371  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2372  uint8_t **out = &is->audio_buf1;
2373  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2374  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2375  int len2;
2376  if (out_size < 0) {
2377  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2378  return -1;
2379  }
2380  if (wanted_nb_samples != af->frame->nb_samples) {
2381  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2382  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2383  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2384  return -1;
2385  }
2386  }
2387  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2388  if (!is->audio_buf1)
2389  return AVERROR(ENOMEM);
2390  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2391  if (len2 < 0) {
2392  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2393  return -1;
2394  }
2395  if (len2 == out_count) {
2396  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2397  if (swr_init(is->swr_ctx) < 0)
2398  swr_free(&is->swr_ctx);
2399  }
2400  is->audio_buf = is->audio_buf1;
2401  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2402  } else {
2403  is->audio_buf = af->frame->data[0];
2404  resampled_data_size = data_size;
2405  }
2406 
2407  audio_clock0 = is->audio_clock;
2408  /* update the audio clock with the pts */
2409  if (!isnan(af->pts))
2410  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2411  else
2412  is->audio_clock = NAN;
2413  is->audio_clock_serial = af->serial;
2414 #ifdef DEBUG
2415  {
2416  static double last_clock;
2417  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2418  is->audio_clock - last_clock,
2419  is->audio_clock, audio_clock0);
2420  last_clock = is->audio_clock;
2421  }
2422 #endif
2423  return resampled_data_size;
2424 }
2425 
2426 /* prepare a new audio buffer */
2427 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2428 {
2429  VideoState *is = opaque;
2430  int audio_size, len1;
2431 
2433 
2434  while (len > 0) {
2435  if (is->audio_buf_index >= is->audio_buf_size) {
2436  audio_size = audio_decode_frame(is);
2437  if (audio_size < 0) {
2438  /* if error, just output silence */
2439  is->audio_buf = is->silence_buf;
2440  is->audio_buf_size = sizeof(is->silence_buf) / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2441  } else {
2442  if (is->show_mode != SHOW_MODE_VIDEO)
2443  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2444  is->audio_buf_size = audio_size;
2445  }
2446  is->audio_buf_index = 0;
2447  }
2448  len1 = is->audio_buf_size - is->audio_buf_index;
2449  if (len1 > len)
2450  len1 = len;
2451  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2452  len -= len1;
2453  stream += len1;
2454  is->audio_buf_index += len1;
2455  }
2457  /* Let's assume the audio driver that is used by SDL has two periods. */
2458  if (!isnan(is->audio_clock)) {
2460  sync_clock_to_slave(&is->extclk, &is->audclk);
2461  }
2462 }
2463 
2464 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2465 {
2466  SDL_AudioSpec wanted_spec, spec;
2467  const char *env;
2468  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2469  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2470  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2471 
2472  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2473  if (env) {
2474  wanted_nb_channels = atoi(env);
2475  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2476  }
2477  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2478  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2479  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2480  }
2481  wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2482  wanted_spec.channels = wanted_nb_channels;
2483  wanted_spec.freq = wanted_sample_rate;
2484  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2485  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2486  return -1;
2487  }
2488  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2489  next_sample_rate_idx--;
2490  wanted_spec.format = AUDIO_S16SYS;
2491  wanted_spec.silence = 0;
2492  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2493  wanted_spec.callback = sdl_audio_callback;
2494  wanted_spec.userdata = opaque;
2495  while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2496  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2497  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2498  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2499  if (!wanted_spec.channels) {
2500  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2501  wanted_spec.channels = wanted_nb_channels;
2502  if (!wanted_spec.freq) {
2504  "No more combinations to try, audio open failed\n");
2505  return -1;
2506  }
2507  }
2508  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2509  }
2510  if (spec.format != AUDIO_S16SYS) {
2512  "SDL advised audio format %d is not supported!\n", spec.format);
2513  return -1;
2514  }
2515  if (spec.channels != wanted_spec.channels) {
2516  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2517  if (!wanted_channel_layout) {
2519  "SDL advised channel count %d is not supported!\n", spec.channels);
2520  return -1;
2521  }
2522  }
2523 
2524  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2525  audio_hw_params->freq = spec.freq;
2526  audio_hw_params->channel_layout = wanted_channel_layout;
2527  audio_hw_params->channels = spec.channels;
2528  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
2529  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2530  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2531  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2532  return -1;
2533  }
2534  return spec.size;
2535 }
2536 
2537 /* open a given stream. Return 0 if OK */
2538 static int stream_component_open(VideoState *is, int stream_index)
2539 {
2540  AVFormatContext *ic = is->ic;
2541  AVCodecContext *avctx;
2542  AVCodec *codec;
2543  const char *forced_codec_name = NULL;
2544  AVDictionary *opts;
2545  AVDictionaryEntry *t = NULL;
2546  int sample_rate, nb_channels;
2547  int64_t channel_layout;
2548  int ret = 0;
2549  int stream_lowres = lowres;
2550 
2551  if (stream_index < 0 || stream_index >= ic->nb_streams)
2552  return -1;
2553  avctx = ic->streams[stream_index]->codec;
2554 
2555  codec = avcodec_find_decoder(avctx->codec_id);
2556 
2557  switch(avctx->codec_type){
2558  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2559  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2560  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2561  }
2562  if (forced_codec_name)
2563  codec = avcodec_find_decoder_by_name(forced_codec_name);
2564  if (!codec) {
2565  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2566  "No codec could be found with name '%s'\n", forced_codec_name);
2567  else av_log(NULL, AV_LOG_WARNING,
2568  "No codec could be found with id %d\n", avctx->codec_id);
2569  return -1;
2570  }
2571 
2572  avctx->codec_id = codec->id;
2573  if(stream_lowres > av_codec_get_max_lowres(codec)){
2574  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2575  av_codec_get_max_lowres(codec));
2576  stream_lowres = av_codec_get_max_lowres(codec);
2577  }
2578  av_codec_set_lowres(avctx, stream_lowres);
2579 
2580 #if FF_API_EMU_EDGE
2581  if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2582 #endif
2583  if (fast)
2584  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2585 #if FF_API_EMU_EDGE
2586  if(codec->capabilities & AV_CODEC_CAP_DR1)
2587  avctx->flags |= CODEC_FLAG_EMU_EDGE;
2588 #endif
2589 
2590  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2591  if (!av_dict_get(opts, "threads", NULL, 0))
2592  av_dict_set(&opts, "threads", "auto", 0);
2593  if (stream_lowres)
2594  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2595  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2596  av_dict_set(&opts, "refcounted_frames", "1", 0);
2597  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2598  goto fail;
2599  }
2600  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2601  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2603  goto fail;
2604  }
2605 
2606  is->eof = 0;
2607  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2608  switch (avctx->codec_type) {
2609  case AVMEDIA_TYPE_AUDIO:
2610 #if CONFIG_AVFILTER
2611  {
2612  AVFilterLink *link;
2613 
2614  is->audio_filter_src.freq = avctx->sample_rate;
2615  is->audio_filter_src.channels = avctx->channels;
2616  is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2617  is->audio_filter_src.fmt = avctx->sample_fmt;
2618  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2619  goto fail;
2620  link = is->out_audio_filter->inputs[0];
2621  sample_rate = link->sample_rate;
2622  nb_channels = link->channels;
2623  channel_layout = link->channel_layout;
2624  }
2625 #else
2626  sample_rate = avctx->sample_rate;
2627  nb_channels = avctx->channels;
2628  channel_layout = avctx->channel_layout;
2629 #endif
2630 
2631  /* prepare audio output */
2632  if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2633  goto fail;
2634  is->audio_hw_buf_size = ret;
2635  is->audio_src = is->audio_tgt;
2636  is->audio_buf_size = 0;
2637  is->audio_buf_index = 0;
2638 
2639  /* init averaging filter */
2640  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2641  is->audio_diff_avg_count = 0;
2642  /* since we do not have a precise anough audio fifo fullness,
2643  we correct audio sync only if larger than this threshold */
2645 
2646  is->audio_stream = stream_index;
2647  is->audio_st = ic->streams[stream_index];
2648 
2649  decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
2651  is->auddec.start_pts = is->audio_st->start_time;
2653  }
2654  decoder_start(&is->auddec, audio_thread, is);
2655  SDL_PauseAudio(0);
2656  break;
2657  case AVMEDIA_TYPE_VIDEO:
2658  is->video_stream = stream_index;
2659  is->video_st = ic->streams[stream_index];
2660 
2661  is->viddec_width = avctx->width;
2662  is->viddec_height = avctx->height;
2663 
2664  decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
2665  decoder_start(&is->viddec, video_thread, is);
2666  is->queue_attachments_req = 1;
2667  break;
2668  case AVMEDIA_TYPE_SUBTITLE:
2669  is->subtitle_stream = stream_index;
2670  is->subtitle_st = ic->streams[stream_index];
2671 
2672  decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
2674  break;
2675  default:
2676  break;
2677  }
2678 
2679 fail:
2680  av_dict_free(&opts);
2681 
2682  return ret;
2683 }
2684 
2685 static void stream_component_close(VideoState *is, int stream_index)
2686 {
2687  AVFormatContext *ic = is->ic;
2688  AVCodecContext *avctx;
2689 
2690  if (stream_index < 0 || stream_index >= ic->nb_streams)
2691  return;
2692  avctx = ic->streams[stream_index]->codec;
2693 
2694  switch (avctx->codec_type) {
2695  case AVMEDIA_TYPE_AUDIO:
2696  decoder_abort(&is->auddec, &is->sampq);
2697  SDL_CloseAudio();
2698  decoder_destroy(&is->auddec);
2699  swr_free(&is->swr_ctx);
2700  av_freep(&is->audio_buf1);
2701  is->audio_buf1_size = 0;
2702  is->audio_buf = NULL;
2703 
2704  if (is->rdft) {
2705  av_rdft_end(is->rdft);
2706  av_freep(&is->rdft_data);
2707  is->rdft = NULL;
2708  is->rdft_bits = 0;
2709  }
2710  break;
2711  case AVMEDIA_TYPE_VIDEO:
2712  decoder_abort(&is->viddec, &is->pictq);
2713  decoder_destroy(&is->viddec);
2714  break;
2715  case AVMEDIA_TYPE_SUBTITLE:
2716  decoder_abort(&is->subdec, &is->subpq);
2717  decoder_destroy(&is->subdec);
2718  break;
2719  default:
2720  break;
2721  }
2722 
2723  ic->streams[stream_index]->discard = AVDISCARD_ALL;
2724  avcodec_close(avctx);
2725  switch (avctx->codec_type) {
2726  case AVMEDIA_TYPE_AUDIO:
2727  is->audio_st = NULL;
2728  is->audio_stream = -1;
2729  break;
2730  case AVMEDIA_TYPE_VIDEO:
2731  is->video_st = NULL;
2732  is->video_stream = -1;
2733  break;
2734  case AVMEDIA_TYPE_SUBTITLE:
2735  is->subtitle_st = NULL;
2736  is->subtitle_stream = -1;
2737  break;
2738  default:
2739  break;
2740  }
2741 }
2742 
2743 static int decode_interrupt_cb(void *ctx)
2744 {
2745  VideoState *is = ctx;
2746  return is->abort_request;
2747 }
2748 
2750 {
2751  if( !strcmp(s->iformat->name, "rtp")
2752  || !strcmp(s->iformat->name, "rtsp")
2753  || !strcmp(s->iformat->name, "sdp")
2754  )
2755  return 1;
2756 
2757  if(s->pb && ( !strncmp(s->filename, "rtp:", 4)
2758  || !strncmp(s->filename, "udp:", 4)
2759  )
2760  )
2761  return 1;
2762  return 0;
2763 }
2764 
2765 /* this thread gets the stream from the disk or the network */
2766 static int read_thread(void *arg)
2767 {
2768  VideoState *is = arg;
2769  AVFormatContext *ic = NULL;
2770  int err, i, ret;
2771  int st_index[AVMEDIA_TYPE_NB];
2772  AVPacket pkt1, *pkt = &pkt1;
2773  int64_t stream_start_time;
2774  int pkt_in_play_range = 0;
2775  AVDictionaryEntry *t;
2776  AVDictionary **opts;
2777  int orig_nb_streams;
2778  SDL_mutex *wait_mutex = SDL_CreateMutex();
2779  int scan_all_pmts_set = 0;
2780  int64_t pkt_ts;
2781 
2782  memset(st_index, -1, sizeof(st_index));
2783  is->last_video_stream = is->video_stream = -1;
2784  is->last_audio_stream = is->audio_stream = -1;
2785  is->last_subtitle_stream = is->subtitle_stream = -1;
2786  is->eof = 0;
2787 
2788  ic = avformat_alloc_context();
2789  if (!ic) {
2790  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2791  ret = AVERROR(ENOMEM);
2792  goto fail;
2793  }
2795  ic->interrupt_callback.opaque = is;
2796  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2797  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2798  scan_all_pmts_set = 1;
2799  }
2800  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2801  if (err < 0) {
2802  print_error(is->filename, err);
2803  ret = -1;
2804  goto fail;
2805  }
2806  if (scan_all_pmts_set)
2807  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2808 
2810  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2812  goto fail;
2813  }
2814  is->ic = ic;
2815 
2816  if (genpts)
2817  ic->flags |= AVFMT_FLAG_GENPTS;
2818 
2820 
2822  orig_nb_streams = ic->nb_streams;
2823 
2824  err = avformat_find_stream_info(ic, opts);
2825 
2826  for (i = 0; i < orig_nb_streams; i++)
2827  av_dict_free(&opts[i]);
2828  av_freep(&opts);
2829 
2830  if (err < 0) {
2832  "%s: could not find codec parameters\n", is->filename);
2833  ret = -1;
2834  goto fail;
2835  }
2836 
2837  if (ic->pb)
2838  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2839 
2840  if (seek_by_bytes < 0)
2841  seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2842 
2843  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2844 
2845  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2846  window_title = av_asprintf("%s - %s", t->value, input_filename);
2847 
2848  /* if seeking requested, we execute it */
2849  if (start_time != AV_NOPTS_VALUE) {
2850  int64_t timestamp;
2851 
2852  timestamp = start_time;
2853  /* add the stream start time */
2854  if (ic->start_time != AV_NOPTS_VALUE)
2855  timestamp += ic->start_time;
2856  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2857  if (ret < 0) {
2858  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2859  is->filename, (double)timestamp / AV_TIME_BASE);
2860  }
2861  }
2862 
2863  is->realtime = is_realtime(ic);
2864 
2865  if (show_status)
2866  av_dump_format(ic, 0, is->filename, 0);
2867 
2868  for (i = 0; i < ic->nb_streams; i++) {
2869  AVStream *st = ic->streams[i];
2870  enum AVMediaType type = st->codec->codec_type;
2871  st->discard = AVDISCARD_ALL;
2872  if (wanted_stream_spec[type] && st_index[type] == -1)
2873  if (avformat_match_stream_specifier(ic, st, wanted_stream_spec[type]) > 0)
2874  st_index[type] = i;
2875  }
2876  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2877  if (wanted_stream_spec[i] && st_index[i] == -1) {
2878  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2879  st_index[i] = INT_MAX;
2880  }
2881  }
2882 
2883  if (!video_disable)
2884  st_index[AVMEDIA_TYPE_VIDEO] =
2886  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2887  if (!audio_disable)
2888  st_index[AVMEDIA_TYPE_AUDIO] =
2890  st_index[AVMEDIA_TYPE_AUDIO],
2891  st_index[AVMEDIA_TYPE_VIDEO],
2892  NULL, 0);
2894  st_index[AVMEDIA_TYPE_SUBTITLE] =
2896  st_index[AVMEDIA_TYPE_SUBTITLE],
2897  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2898  st_index[AVMEDIA_TYPE_AUDIO] :
2899  st_index[AVMEDIA_TYPE_VIDEO]),
2900  NULL, 0);
2901 
2902  is->show_mode = show_mode;
2903  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2904  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2905  AVCodecContext *avctx = st->codec;
2907  if (avctx->width)
2908  set_default_window_size(avctx->width, avctx->height, sar);
2909  }
2910 
2911  /* open the streams */
2912  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2913  stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2914  }
2915 
2916  ret = -1;
2917  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2918  ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2919  }
2920  if (is->show_mode == SHOW_MODE_NONE)
2921  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2922 
2923  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2924  stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2925  }
2926 
2927  if (is->video_stream < 0 && is->audio_stream < 0) {
2928  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2929  is->filename);
2930  ret = -1;
2931  goto fail;
2932  }
2933 
2934  if (infinite_buffer < 0 && is->realtime)
2935  infinite_buffer = 1;
2936 
2937  for (;;) {
2938  if (is->abort_request)
2939  break;
2940  if (is->paused != is->last_paused) {
2941  is->last_paused = is->paused;
2942  if (is->paused)
2943  is->read_pause_return = av_read_pause(ic);
2944  else
2945  av_read_play(ic);
2946  }
2947 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2948  if (is->paused &&
2949  (!strcmp(ic->iformat->name, "rtsp") ||
2950  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2951  /* wait 10 ms to avoid trying to get another packet */
2952  /* XXX: horrible */
2953  SDL_Delay(10);
2954  continue;
2955  }
2956 #endif
2957  if (is->seek_req) {
2958  int64_t seek_target = is->seek_pos;
2959  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2960  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2961 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2962 // of the seek_pos/seek_rel variables
2963 
2964  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2965  if (ret < 0) {
2967  "%s: error while seeking\n", is->ic->filename);
2968  } else {
2969  if (is->audio_stream >= 0) {
2970  packet_queue_flush(&is->audioq);
2971  packet_queue_put(&is->audioq, &flush_pkt);
2972  }
2973  if (is->subtitle_stream >= 0) {
2975  packet_queue_put(&is->subtitleq, &flush_pkt);
2976  }
2977  if (is->video_stream >= 0) {
2978  packet_queue_flush(&is->videoq);
2979  packet_queue_put(&is->videoq, &flush_pkt);
2980  }
2981  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2982  set_clock(&is->extclk, NAN, 0);
2983  } else {
2984  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
2985  }
2986  }
2987  is->seek_req = 0;
2988  is->queue_attachments_req = 1;
2989  is->eof = 0;
2990  if (is->paused)
2991  step_to_next_frame(is);
2992  }
2993  if (is->queue_attachments_req) {
2995  AVPacket copy;
2996  if ((ret = av_copy_packet(&copy, &is->video_st->attached_pic)) < 0)
2997  goto fail;
2998  packet_queue_put(&is->videoq, &copy);
3000  }
3001  is->queue_attachments_req = 0;
3002  }
3003 
3004  /* if the queue are full, no need to read more */
3005  if (infinite_buffer<1 &&
3006  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
3007  || ( (is->audioq .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
3008  && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request
3010  && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
3011  /* wait 10 ms */
3012  SDL_LockMutex(wait_mutex);
3013  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3014  SDL_UnlockMutex(wait_mutex);
3015  continue;
3016  }
3017  if (!is->paused &&
3018  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3019  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3020  if (loop != 1 && (!loop || --loop)) {
3021  stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
3022  } else if (autoexit) {
3023  ret = AVERROR_EOF;
3024  goto fail;
3025  }
3026  }
3027  ret = av_read_frame(ic, pkt);
3028  if (ret < 0) {
3029  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3030  if (is->video_stream >= 0)
3032  if (is->audio_stream >= 0)
3034  if (is->subtitle_stream >= 0)
3036  is->eof = 1;
3037  }
3038  if (ic->pb && ic->pb->error)
3039  break;
3040  SDL_LockMutex(wait_mutex);
3041  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3042  SDL_UnlockMutex(wait_mutex);
3043  continue;
3044  } else {
3045  is->eof = 0;
3046  }
3047  /* check if packet is in play range specified by user, then queue, otherwise discard */
3048  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3049  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3050  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3051  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3052  av_q2d(ic->streams[pkt->stream_index]->time_base) -
3053  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3054  <= ((double)duration / 1000000);
3055  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3056  packet_queue_put(&is->audioq, pkt);
3057  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3059  packet_queue_put(&is->videoq, pkt);
3060  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3061  packet_queue_put(&is->subtitleq, pkt);
3062  } else {
3063  av_free_packet(pkt);
3064  }
3065  }
3066  /* wait until the end */
3067  while (!is->abort_request) {
3068  SDL_Delay(100);
3069  }
3070 
3071  ret = 0;
3072  fail:
3073  /* close each stream */
3074  if (is->audio_stream >= 0)
3076  if (is->video_stream >= 0)
3078  if (is->subtitle_stream >= 0)
3080  if (ic) {
3081  avformat_close_input(&ic);
3082  is->ic = NULL;
3083  }
3084 
3085  if (ret != 0) {
3086  SDL_Event event;
3087 
3088  event.type = FF_QUIT_EVENT;
3089  event.user.data1 = is;
3090  SDL_PushEvent(&event);
3091  }
3092  SDL_DestroyMutex(wait_mutex);
3093  return 0;
3094 }
3095 
3096 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
3097 {
3098  VideoState *is;
3099 
3100  is = av_mallocz(sizeof(VideoState));
3101  if (!is)
3102  return NULL;
3103  av_strlcpy(is->filename, filename, sizeof(is->filename));
3104  is->iformat = iformat;
3105  is->ytop = 0;
3106  is->xleft = 0;
3107 
3108  /* start video display */
3109  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3110  goto fail;
3111  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3112  goto fail;
3113  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3114  goto fail;
3115 
3116  packet_queue_init(&is->videoq);
3117  packet_queue_init(&is->audioq);
3119 
3120  is->continue_read_thread = SDL_CreateCond();
3121 
3122  init_clock(&is->vidclk, &is->videoq.serial);
3123  init_clock(&is->audclk, &is->audioq.serial);
3124  init_clock(&is->extclk, &is->extclk.serial);
3125  is->audio_clock_serial = -1;
3126  is->av_sync_type = av_sync_type;
3127  is->read_tid = SDL_CreateThread(read_thread, is);
3128  if (!is->read_tid) {
3129 fail:
3130  stream_close(is);
3131  return NULL;
3132  }
3133  return is;
3134 }
3135 
3137 {
3138  AVFormatContext *ic = is->ic;
3139  int start_index, stream_index;
3140  int old_index;
3141  AVStream *st;
3142  AVProgram *p = NULL;
3143  int nb_streams = is->ic->nb_streams;
3144 
3145  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3146  start_index = is->last_video_stream;
3147  old_index = is->video_stream;
3148  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3149  start_index = is->last_audio_stream;
3150  old_index = is->audio_stream;
3151  } else {
3152  start_index = is->last_subtitle_stream;
3153  old_index = is->subtitle_stream;
3154  }
3155  stream_index = start_index;
3156 
3157  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3159  if (p) {
3160  nb_streams = p->nb_stream_indexes;
3161  for (start_index = 0; start_index < nb_streams; start_index++)
3162  if (p->stream_index[start_index] == stream_index)
3163  break;
3164  if (start_index == nb_streams)
3165  start_index = -1;
3166  stream_index = start_index;
3167  }
3168  }
3169 
3170  for (;;) {
3171  if (++stream_index >= nb_streams)
3172  {
3173  if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3174  {
3175  stream_index = -1;
3176  is->last_subtitle_stream = -1;
3177  goto the_end;
3178  }
3179  if (start_index == -1)
3180  return;
3181  stream_index = 0;
3182  }
3183  if (stream_index == start_index)
3184  return;
3185  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3186  if (st->codec->codec_type == codec_type) {
3187  /* check that parameters are OK */
3188  switch (codec_type) {
3189  case AVMEDIA_TYPE_AUDIO:
3190  if (st->codec->sample_rate != 0 &&
3191  st->codec->channels != 0)
3192  goto the_end;
3193  break;
3194  case AVMEDIA_TYPE_VIDEO:
3195  case AVMEDIA_TYPE_SUBTITLE:
3196  goto the_end;
3197  default:
3198  break;
3199  }
3200  }
3201  }
3202  the_end:
3203  if (p && stream_index != -1)
3204  stream_index = p->stream_index[stream_index];
3205  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3206  av_get_media_type_string(codec_type),
3207  old_index,
3208  stream_index);
3209 
3210  stream_component_close(is, old_index);
3211  stream_component_open(is, stream_index);
3212 }
3213 
3214 
3216 {
3217 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
3218  /* OS X needs to reallocate the SDL overlays */
3219  int i;
3220  for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
3221  is->pictq.queue[i].reallocate = 1;
3222 #endif
3224  video_open(is, 1, NULL);
3225 }
3226 
3228 {
3229  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
3230  int next = is->show_mode;
3231  do {
3232  next = (next + 1) % SHOW_MODE_NB;
3233  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3234  if (is->show_mode != next) {
3236  is->xleft, is->ytop, is->width, is->height,
3237  bgcolor, 1);
3238  is->force_refresh = 1;
3239  is->show_mode = next;
3240  }
3241 }
3242 
3243 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3244  double remaining_time = 0.0;
3245  SDL_PumpEvents();
3246  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
3248  SDL_ShowCursor(0);
3249  cursor_hidden = 1;
3250  }
3251  if (remaining_time > 0.0)
3252  av_usleep((int64_t)(remaining_time * 1000000.0));
3253  remaining_time = REFRESH_RATE;
3254  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3255  video_refresh(is, &remaining_time);
3256  SDL_PumpEvents();
3257  }
3258 }
3259 
3260 static void seek_chapter(VideoState *is, int incr)
3261 {
3262  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3263  int i;
3264 
3265  if (!is->ic->nb_chapters)
3266  return;
3267 
3268  /* find the current chapter */
3269  for (i = 0; i < is->ic->nb_chapters; i++) {
3270  AVChapter *ch = is->ic->chapters[i];
3271  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3272  i--;
3273  break;
3274  }
3275  }
3276 
3277  i += incr;
3278  i = FFMAX(i, 0);
3279  if (i >= is->ic->nb_chapters)
3280  return;
3281 
3282  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3283  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3284  AV_TIME_BASE_Q), 0, 0);
3285 }
3286 
3287 /* handle an event sent by the GUI */
3288 static void event_loop(VideoState *cur_stream)
3289 {
3290  SDL_Event event;
3291  double incr, pos, frac;
3292 
3293  for (;;) {
3294  double x;
3295  refresh_loop_wait_event(cur_stream, &event);
3296  switch (event.type) {
3297  case SDL_KEYDOWN:
3298  if (exit_on_keydown) {
3299  do_exit(cur_stream);
3300  break;
3301  }
3302  switch (event.key.keysym.sym) {
3303  case SDLK_ESCAPE:
3304  case SDLK_q:
3305  do_exit(cur_stream);
3306  break;
3307  case SDLK_f:
3308  toggle_full_screen(cur_stream);
3309  cur_stream->force_refresh = 1;
3310  break;
3311  case SDLK_p:
3312  case SDLK_SPACE:
3313  toggle_pause(cur_stream);
3314  break;
3315  case SDLK_s: // S: Step to next frame
3316  step_to_next_frame(cur_stream);
3317  break;
3318  case SDLK_a:
3320  break;
3321  case SDLK_v:
3323  break;
3324  case SDLK_c:
3328  break;
3329  case SDLK_t:
3331  break;
3332  case SDLK_w:
3333 #if CONFIG_AVFILTER
3334  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3335  if (++cur_stream->vfilter_idx >= nb_vfilters)
3336  cur_stream->vfilter_idx = 0;
3337  } else {
3338  cur_stream->vfilter_idx = 0;
3339  toggle_audio_display(cur_stream);
3340  }
3341 #else
3342  toggle_audio_display(cur_stream);
3343 #endif
3344  break;
3345  case SDLK_PAGEUP:
3346  if (cur_stream->ic->nb_chapters <= 1) {
3347  incr = 600.0;
3348  goto do_seek;
3349  }
3350  seek_chapter(cur_stream, 1);
3351  break;
3352  case SDLK_PAGEDOWN:
3353  if (cur_stream->ic->nb_chapters <= 1) {
3354  incr = -600.0;
3355  goto do_seek;
3356  }
3357  seek_chapter(cur_stream, -1);
3358  break;
3359  case SDLK_LEFT:
3360  incr = -10.0;
3361  goto do_seek;
3362  case SDLK_RIGHT:
3363  incr = 10.0;
3364  goto do_seek;
3365  case SDLK_UP:
3366  incr = 60.0;
3367  goto do_seek;
3368  case SDLK_DOWN:
3369  incr = -60.0;
3370  do_seek:
3371  if (seek_by_bytes) {
3372  pos = -1;
3373  if (pos < 0 && cur_stream->video_stream >= 0)
3374  pos = frame_queue_last_pos(&cur_stream->pictq);
3375  if (pos < 0 && cur_stream->audio_stream >= 0)
3376  pos = frame_queue_last_pos(&cur_stream->sampq);
3377  if (pos < 0)
3378  pos = avio_tell(cur_stream->ic->pb);
3379  if (cur_stream->ic->bit_rate)
3380  incr *= cur_stream->ic->bit_rate / 8.0;
3381  else
3382  incr *= 180000.0;
3383  pos += incr;
3384  stream_seek(cur_stream, pos, incr, 1);
3385  } else {
3386  pos = get_master_clock(cur_stream);
3387  if (isnan(pos))
3388  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3389  pos += incr;
3390  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3391  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3392  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3393  }
3394  break;
3395  default:
3396  break;
3397  }
3398  break;
3399  case SDL_VIDEOEXPOSE:
3400  cur_stream->force_refresh = 1;
3401  break;
3402  case SDL_MOUSEBUTTONDOWN:
3403  if (exit_on_mousedown) {
3404  do_exit(cur_stream);
3405  break;
3406  }
3407  case SDL_MOUSEMOTION:
3408  if (cursor_hidden) {
3409  SDL_ShowCursor(1);
3410  cursor_hidden = 0;
3411  }
3413  if (event.type == SDL_MOUSEBUTTONDOWN) {
3414  x = event.button.x;
3415  } else {
3416  if (event.motion.state != SDL_PRESSED)
3417  break;
3418  x = event.motion.x;
3419  }
3420  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3421  uint64_t size = avio_size(cur_stream->ic->pb);
3422  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3423  } else {
3424  int64_t ts;
3425  int ns, hh, mm, ss;
3426  int tns, thh, tmm, tss;
3427  tns = cur_stream->ic->duration / 1000000LL;
3428  thh = tns / 3600;
3429  tmm = (tns % 3600) / 60;
3430  tss = (tns % 60);
3431  frac = x / cur_stream->width;
3432  ns = frac * tns;
3433  hh = ns / 3600;
3434  mm = (ns % 3600) / 60;
3435  ss = (ns % 60);
3437  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3438  hh, mm, ss, thh, tmm, tss);
3439  ts = frac * cur_stream->ic->duration;
3440  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3441  ts += cur_stream->ic->start_time;
3442  stream_seek(cur_stream, ts, 0, 0);
3443  }
3444  break;
3445  case SDL_VIDEORESIZE:
3446  screen = SDL_SetVideoMode(FFMIN(16383, event.resize.w), event.resize.h, 0,
3447  SDL_HWSURFACE|(is_full_screen?SDL_FULLSCREEN:SDL_RESIZABLE)|SDL_ASYNCBLIT|SDL_HWACCEL);
3448  if (!screen) {
3449  av_log(NULL, AV_LOG_FATAL, "Failed to set video mode\n");
3450  do_exit(cur_stream);
3451  }
3452  screen_width = cur_stream->width = screen->w;
3453  screen_height = cur_stream->height = screen->h;
3454  cur_stream->force_refresh = 1;
3455  break;
3456  case SDL_QUIT:
3457  case FF_QUIT_EVENT:
3458  do_exit(cur_stream);
3459  break;
3460  case FF_ALLOC_EVENT:
3461  alloc_picture(event.user.data1);
3462  break;
3463  default:
3464  break;
3465  }
3466  }
3467 }
3468 
3469 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3470 {
3471  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3472  return opt_default(NULL, "video_size", arg);
3473 }
3474 
3475 static int opt_width(void *optctx, const char *opt, const char *arg)
3476 {
3477  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3478  return 0;
3479 }
3480 
3481 static int opt_height(void *optctx, const char *opt, const char *arg)
3482 {
3483  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3484  return 0;
3485 }
3486 
3487 static int opt_format(void *optctx, const char *opt, const char *arg)
3488 {
3489  file_iformat = av_find_input_format(arg);
3490  if (!file_iformat) {
3491  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3492  return AVERROR(EINVAL);
3493  }
3494  return 0;
3495 }
3496 
3497 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3498 {
3499  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3500  return opt_default(NULL, "pixel_format", arg);
3501 }
3502 
3503 static int opt_sync(void *optctx, const char *opt, const char *arg)
3504 {
3505  if (!strcmp(arg, "audio"))
3507  else if (!strcmp(arg, "video"))
3509  else if (!strcmp(arg, "ext"))
3511  else {
3512  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3513  exit(1);
3514  }
3515  return 0;
3516 }
3517 
3518 static int opt_seek(void *optctx, const char *opt, const char *arg)
3519 {
3520  start_time = parse_time_or_die(opt, arg, 1);
3521  return 0;
3522 }
3523 
3524 static int opt_duration(void *optctx, const char *opt, const char *arg)
3525 {
3526  duration = parse_time_or_die(opt, arg, 1);
3527  return 0;
3528 }
3529 
3530 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3531 {
3532  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3533  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3534  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3535  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3536  return 0;
3537 }
3538 
3539 static void opt_input_file(void *optctx, const char *filename)
3540 {
3541  if (input_filename) {
3543  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3544  filename, input_filename);
3545  exit(1);
3546  }
3547  if (!strcmp(filename, "-"))
3548  filename = "pipe:";
3549  input_filename = filename;
3550 }
3551 
3552 static int opt_codec(void *optctx, const char *opt, const char *arg)
3553 {
3554  const char *spec = strchr(opt, ':');
3555  if (!spec) {
3557  "No media specifier was specified in '%s' in option '%s'\n",
3558  arg, opt);
3559  return AVERROR(EINVAL);
3560  }
3561  spec++;
3562  switch (spec[0]) {
3563  case 'a' : audio_codec_name = arg; break;
3564  case 's' : subtitle_codec_name = arg; break;
3565  case 'v' : video_codec_name = arg; break;
3566  default:
3568  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3569  return AVERROR(EINVAL);
3570  }
3571  return 0;
3572 }
3573 
3574 static int dummy;
3575 
3576 static const OptionDef options[] = {
3577 #include "cmdutils_common_opts.h"
3578  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3579  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3580  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3581  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3582  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3583  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3584  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3585  { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3586  { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3587  { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3588  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3589  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3590  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3591  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3592  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3593  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3594  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3595  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3596  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3597  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3598  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3599  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3600  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3601  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3602  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3603  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3604  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3605  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3606  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3607 #if CONFIG_AVFILTER
3608  { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3609  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3610 #endif
3611  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3612  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3613  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3614  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3615  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3616  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3617  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3618  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3619  { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3620  { NULL, },
3621 };
3622 
3623 static void show_usage(void)
3624 {
3625  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3626  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3627  av_log(NULL, AV_LOG_INFO, "\n");
3628 }
3629 
3630 void show_help_default(const char *opt, const char *arg)
3631 {
3633  show_usage();
3634  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3635  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3636  printf("\n");
3639 #if !CONFIG_AVFILTER
3641 #else
3643 #endif
3644  printf("\nWhile playing:\n"
3645  "q, ESC quit\n"
3646  "f toggle full screen\n"
3647  "p, SPC pause\n"
3648  "a cycle audio channel in the current program\n"
3649  "v cycle video channel\n"
3650  "t cycle subtitle channel in the current program\n"
3651  "c cycle program\n"
3652  "w cycle video filters or show modes\n"
3653  "s activate frame-step mode\n"
3654  "left/right seek backward/forward 10 seconds\n"
3655  "down/up seek backward/forward 1 minute\n"
3656  "page down/page up seek backward/forward 10 minutes\n"
3657  "mouse click seek to percentage in file corresponding to fraction of width\n"
3658  );
3659 }
3660 
3661 static int lockmgr(void **mtx, enum AVLockOp op)
3662 {
3663  switch(op) {
3664  case AV_LOCK_CREATE:
3665  *mtx = SDL_CreateMutex();
3666  if(!*mtx)
3667  return 1;
3668  return 0;
3669  case AV_LOCK_OBTAIN:
3670  return !!SDL_LockMutex(*mtx);
3671  case AV_LOCK_RELEASE:
3672  return !!SDL_UnlockMutex(*mtx);
3673  case AV_LOCK_DESTROY:
3674  SDL_DestroyMutex(*mtx);
3675  return 0;
3676  }
3677  return 1;
3678 }
3679 
3680 /* Called from the main */
3681 int main(int argc, char **argv)
3682 {
3683  int flags;
3684  VideoState *is;
3685  char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3686 
3688  parse_loglevel(argc, argv, options);
3689 
3690  /* register all codecs, demux and protocols */
3691 #if CONFIG_AVDEVICE
3693 #endif
3694 #if CONFIG_AVFILTER
3696 #endif
3697  av_register_all();
3699 
3700  init_opts();
3701 
3702  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3703  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3704 
3705  show_banner(argc, argv, options);
3706 
3707  parse_options(NULL, argc, argv, options, opt_input_file);
3708 
3709  if (!input_filename) {
3710  show_usage();
3711  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3713  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3714  exit(1);
3715  }
3716 
3717  if (display_disable) {
3718  video_disable = 1;
3719  }
3720  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3721  if (audio_disable)
3722  flags &= ~SDL_INIT_AUDIO;
3723  if (display_disable)
3724  SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3725 #if !defined(_WIN32) && !defined(__APPLE__)
3726  flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3727 #endif
3728  if (SDL_Init (flags)) {
3729  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3730  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3731  exit(1);
3732  }
3733 
3734  if (!display_disable) {
3735  const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3736  fs_screen_width = vi->current_w;
3737  fs_screen_height = vi->current_h;
3738  }
3739 
3740  SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3741  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3742  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3743 
3745  av_log(NULL, AV_LOG_FATAL, "Could not initialize lock manager!\n");
3746  do_exit(NULL);
3747  }
3748 
3749  av_init_packet(&flush_pkt);
3750  flush_pkt.data = (uint8_t *)&flush_pkt;
3751 
3752  is = stream_open(input_filename, file_iformat);
3753  if (!is) {
3754  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3755  do_exit(NULL);
3756  }
3757 
3758  event_loop(is);
3759 
3760  /* never returns */
3761 
3762  return 0;
3763 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1474
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:92
AVFilterContext ** filters
Definition: avfilter.h:1177
static void packet_queue_abort(PacketQueue *q)
Definition: ffplay.c:482
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3530
static void frame_queue_push(FrameQueue *f)
Definition: ffplay.c:727
static void video_image_display(VideoState *is)
Definition: ffplay.c:923
#define NULL
Definition: coverity.c:32
Clock audclk
Definition: ffplay.c:215
const char const char void * val
Definition: avisynth_c.h:634
#define AVFMT_NOBINSEARCH
Format does not allow to fall back on binary search via read_timestamp.
Definition: avformat.h:478
static void packet_queue_flush(PacketQueue *q)
Definition: ffplay.c:458
const char * s
Definition: avisynth_c.h:631
int width
Definition: ffplay.c:290
#define OPT_EXPERT
Definition: cmdutils.h:163
static double get_clock(Clock *c)
Definition: ffplay.c:1221
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:282
enum AVSampleFormat fmt
Definition: ffplay.c:133
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3481
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffplay.c:65
SDL_cond * cond
Definition: ffplay.c:173
int64_t pts_correction_num_faulty_dts
Number of incorrect PTS values so far.
Definition: avcodec.h:3346
static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
Definition: ffplay.c:2464
int linesize[AV_NUM_DATA_POINTERS]
number of bytes per line
Definition: avcodec.h:3746
void av_free_packet(AVPacket *pkt)
Free a packet.
Definition: avpacket.c:280
FrameQueue pictq
Definition: ffplay.c:219
static int video_open(VideoState *is, int force_set_video_mode, Frame *vp)
Definition: ffplay.c:1170
Decoder auddec
Definition: ffplay.c:223
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:90
AVStream * subtitle_st
Definition: ffplay.c:272
This structure describes decoded (raw) audio or video data.
Definition: frame.h:171
#define VIDEO_PICTURE_QUEUE_SIZE
Definition: ffplay.c:124
static double rint(double x)
Definition: libm.h:141
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3774
#define SWS_BICUBIC
Definition: swscale.h:58
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1523
AVOption.
Definition: opt.h:255
double rdftspeed
Definition: ffplay.c:341
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
double frame_timer
Definition: ffplay.c:275
static AVInputFormat * file_iformat
Definition: ffplay.c:308
#define OPT_VIDEO
Definition: cmdutils.h:165
static void opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3539
double get_rotation(AVStream *st)
Definition: cmdutils.c:2024
int av_lockmgr_register(int(*cb)(void **mutex, enum AVLockOp op))
Register a user provided lock manager supporting the operations specified by AVLockOp.
Definition: utils.c:3667
misc image utilities
static int opt_format(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3487
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:76
Unlock the mutex.
Definition: avcodec.h:5533
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
static const AVFilterPad outputs[]
Definition: af_ashowinfo.c:248
AVRational next_pts_tb
Definition: ffplay.c:195
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1269
Main libavfilter public API header.
int rindex
Definition: ffplay.c:166
#define AV_SYNC_FRAMEDUP_THRESHOLD
Definition: ffplay.c:82
int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: utils.c:405
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:81
static int default_height
Definition: ffplay.c:314
Memory buffer source API.
double frame_last_filter_delay
Definition: ffplay.c:277
FrameQueue sampq
Definition: ffplay.c:221
enum VideoState::ShowMode show_mode
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:187
int seek_flags
Definition: ffplay.c:208
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:1366
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:757
int serial
Definition: ffplay.c:119
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio...
Definition: utils.c:4265
static int64_t cur_time
Definition: ffserver.c:255
#define OPT_AUDIO
Definition: cmdutils.h:166
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3136
int num
numerator
Definition: rational.h:44
static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3497
int size
Definition: avcodec.h:1424
const char * b
Definition: vf_curves.c:109
static void stream_toggle_pause(VideoState *is)
Definition: ffplay.c:1333
#define DEBUG
Definition: vf_framerate.c:29
MyAVPacketList * first_pkt
Definition: ffplay.c:115
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
Definition: ffplay.c:1319
static int seek_by_bytes
Definition: ffplay.c:321
double audio_diff_cum
Definition: ffplay.c:236
static void packet_queue_init(PacketQueue *q)
Definition: ffplay.c:450
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1902
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
#define REFRESH_RATE
Definition: ffplay.c:98
AVInputFormat * iformat
Definition: ffplay.c:201
enum AVMediaType codec_type
Definition: rtp.c:37
AVCodecContext * avctx
Definition: ffplay.c:187
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1122
int paused
Definition: ffplay.c:204
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3552
static AVStream * video_stream
static unsigned sws_flags
Definition: ffplay.c:106
int abort_request
Definition: ffplay.c:118
unsigned num_rects
Definition: avcodec.h:3803
static void set_clock_at(Clock *c, double pts, int serial, double time)
Definition: ffplay.c:1233
static void toggle_pause(VideoState *is)
Definition: ffplay.c:1346
SDL_Rect last_display_rect
Definition: ffplay.c:286
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:40
double audio_diff_threshold
Definition: ffplay.c:238
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:484
uint8_t silence_buf[SDL_AUDIO_MIN_BUFFER_SIZE]
Definition: ffplay.c:243
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:76
discard all
Definition: avcodec.h:689
int64_t channel_layout
Definition: ffplay.c:132
static AVPacket pkt
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
Definition: options.c:97
static int audio_disable
Definition: ffplay.c:317
AVStream * audio_st
Definition: ffplay.c:240
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: utils.c:2789
static const char * audio_codec_name
Definition: ffplay.c:338
int av_dup_packet(AVPacket *pkt)
Definition: avpacket.c:248
Picture data structure.
Definition: avcodec.h:3744
int serial
Definition: ffplay.c:152
AVCodec.
Definition: avcodec.h:3472
static void seek_chapter(VideoState *is, int incr)
Definition: ffplay.c:3260
double pts_drift
Definition: ffplay.c:140
AVDictionary * filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, AVCodec *codec)
Filter out options for given codec.
Definition: cmdutils.c:1928
AVLockOp
Lock operation used by lockmgr.
Definition: avcodec.h:5530
int width
Definition: ffplay.c:159
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:1182
AVStream * video_st
Definition: ffplay.c:279
Clock extclk
Definition: ffplay.c:217
static VideoState * stream_open(const char *filename, AVInputFormat *iformat)
Definition: ffplay.c:3096
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1631
void * opaque
Definition: avio.h:52
int viddec_width
Definition: ffplay.c:227
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
struct SwsContext * img_convert_ctx
Definition: ffplay.c:283
AVSubtitleRect ** rects
Definition: avcodec.h:3804
void av_picture_copy(AVPicture *dst, const AVPicture *src, enum AVPixelFormat pix_fmt, int width, int height)
Copy image src to dst.
Definition: avpicture.c:72
Format I/O context.
Definition: avformat.h:1273
static void toggle_audio_display(VideoState *is)
Definition: ffplay.c:3227
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: utils.c:4288
Definition: ffplay.c:149
memory buffer sink API for audio and video
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:262
int av_sync_type
Definition: ffplay.c:232
unsigned int nb_stream_indexes
Definition: avformat.h:1211
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
int rindex_shown
Definition: ffplay.c:171
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:3776
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:641
double pts
Definition: ffplay.c:153
static AVFilter ** last_filter
Definition: avfilter.c:482
Public dictionary API.
double audio_diff_avg_coef
Definition: ffplay.c:237
AVRational start_pts_tb
Definition: ffplay.c:193
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:97
static int read_thread(void *arg)
Definition: ffplay.c:2766
int keep_last
Definition: ffplay.c:170
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:131
int rdft_bits
Definition: ffplay.c:266
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:878
int size
Definition: ffplay.c:117
static Frame * frame_queue_peek(FrameQueue *f)
Definition: ffplay.c:680
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:103
static int64_t start_time
Definition: ffplay.c:325
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2270
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
Definition: cmdutils.c:88
Lock the mutex.
Definition: avcodec.h:5532
uint8_t
static int nb_streams
Definition: ffprobe.c:226
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:135
int opt_default(void *optctx, const char *opt, const char *arg)
Fallback for options that are not explicitly handled, these will be parsed through AVOptions...
Definition: cmdutils.c:526
static int default_width
Definition: ffplay.c:313
int last_video_stream
Definition: ffplay.c:302
int last_subtitle_stream
Definition: ffplay.c:302
8 bit with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:74
AVOptions.
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:642
#define HAS_ARG
Definition: cmdutils.h:161
int audio_hw_buf_size
Definition: ffplay.c:242
static void stream_component_close(VideoState *is, int stream_index)
Definition: ffplay.c:2685
#define Y
Definition: vf_boxblur.c:76
static av_always_inline av_const int isnan(float x)
Definition: libm.h:96
uint8_t * data[AV_NUM_DATA_POINTERS]
pointers to the image data planes
Definition: avcodec.h:3745
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:202
static int decode_interrupt_cb(void *ctx)
Definition: ffplay.c:2743
struct SwrContext * swr_ctx
Definition: ffplay.c:255
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int finished
Definition: ffplay.c:189
libavcodec/libavfilter gluing utilities
static void event_loop(VideoState *cur_stream)
Definition: ffplay.c:3288
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
Definition: ffplay.c:372
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
Definition: utils.c:4309
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:257
static void packet_queue_destroy(PacketQueue *q)
Definition: ffplay.c:475
#define AVFMT_FLAG_GENPTS
Generate missing pts even if it requires parsing future frames.
Definition: avformat.h:1385
static int framedrop
Definition: ffplay.c:335
static void alloc_picture(VideoState *is)
Definition: ffplay.c:1564
void init_opts(void)
Initialize the cmdutils option system, in particular allocate the *_opts contexts.
Definition: cmdutils.c:74
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1341
AVPacket pkt
Definition: ffplay.c:109
int bytes_per_sec
Definition: ffplay.c:135
void avfilter_register_all(void)
Initialize the filter system.
Definition: allfilters.c:40
static AVFrame * frame
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:111
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
void av_codec_set_lowres(AVCodecContext *avctx, int val)
static int64_t audio_callback_time
Definition: ffplay.c:353
static double av_q2d(AVRational a)
Convert rational to double.
Definition: rational.h:80
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:39
static int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
Definition: ffplay.c:383
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1384
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
Definition: ffplay.c:502
static void sigterm_handler(int sig)
Definition: ffplay.c:1157
uint8_t * data
Definition: avcodec.h:1423
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:364
int freq
Definition: ffplay.c:130
int avformat_network_init(void)
Do global initialization of network components.
Definition: utils.c:4200
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:91
Frame queue[FRAME_QUEUE_SIZE]
Definition: ffplay.c:165
Definition: mxfdec.c:266
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define sp
Definition: regdef.h:63
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
Definition: ffplay.c:138
static void copy(LZOContext *c, int cnt)
Copies bytes from input to output buffer with checking.
Definition: lzo.c:85
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:134
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:487
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color, int update)
Definition: ffplay.c:785
external API header
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:84
ptrdiff_t size
Definition: opengl_enc.c:101
int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame)
Accessors for some AVFrame fields.
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:3777
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:390
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags, int alt_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:157
static int64_t duration
Definition: ffplay.c:326
AVRational sar
Definition: ffplay.c:161
AVPacket pkt_temp
Definition: ffplay.c:185
#define A(x)
Definition: vp56_arith.h:28
unsigned int * stream_index
Definition: avformat.h:1210
#define av_log(a,...)
static void duplicate_right_border_pixels(SDL_Overlay *bmp)
Definition: ffplay.c:1595
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
Definition: opt.h:285
PacketQueue videoq
Definition: ffplay.c:280
AVDictionary ** setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts)
Setup AVCodecContext options for avformat_find_stream_info().
Definition: cmdutils.c:1985
AVDictionary * format_opts
Definition: cmdutils.c:68
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:300
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:102
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:140
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:480
Main libavdevice API header.
#define U(x)
Definition: vp56_arith.h:37
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
Definition: utils.c:3572
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: utils.c:2902
libswresample public header
enum AVCodecID id
Definition: avcodec.h:3486
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: utils.c:3555
int audio_diff_avg_count
Definition: ffplay.c:239
const AVS_VideoInfo * vi
Definition: avisynth_c.h:658
int ytop
Definition: ffplay.c:290
int width
width and height of the video frame
Definition: frame.h:220
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:71
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1485
int seek_req
Definition: ffplay.c:207
int(* callback)(void *)
Definition: avio.h:51
int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
Definition: utils.c:2408
Create a mutex.
Definition: avcodec.h:5531
#define FRAME_QUEUE_SIZE
Definition: ffplay.c:127
static void video_refresh(void *opaque, double *remaining_time)
Definition: ffplay.c:1409
int read_pause_return
Definition: ffplay.c:211
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:474
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:302
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3775
#define AVERROR(e)
Definition: error.h:43
static AVStream * audio_stream
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:737
int av_opt_eval_flags(void *obj, const AVOption *o, const char *val, int *flags_out)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:148
The libswresample context.
#define MIN_FRAMES
Definition: ffplay.c:68
RDFTContext * rdft
Definition: ffplay.c:265
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:761
#define EXTERNAL_CLOCK_MAX_FRAMES
Definition: ffplay.c:70
static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
Definition: ffplay.c:439
static int autorotate
Definition: ffplay.c:349
int capabilities
Codec capabilities.
Definition: avcodec.h:3491
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:104
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: utils.c:3638
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1360
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:199
const char * arg
Definition: jacosubdec.c:66
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1597
int reallocate
Definition: ffplay.c:158
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:491
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:393
AVChapter ** chapters
Definition: avformat.h:1475
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:342
simple assert() macros that are a bit more flexible than ISO C assert().
static void stream_close(VideoState *is)
Definition: ffplay.c:1118
int video_stream
Definition: ffplay.c:278
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:47
int * queue_serial
Definition: ffplay.c:145
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1253
int xpos
Definition: ffplay.c:268
int channels
Definition: ffplay.c:131
static enum ShowMode show_mode
Definition: ffplay.c:337
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1206
#define FFMAX(a, b)
Definition: common.h:79
static void packet_queue_start(PacketQueue *q)
Definition: ffplay.c:493
static const OptionDef options[]
Definition: ffplay.c:3576
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:128
float FFTSample
Definition: avfft.h:35
static int dummy
Definition: ffplay.c:3574
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:83
#define fail()
Definition: checkasm.h:57
double audio_clock
Definition: ffplay.c:234
static const int sample_rates[]
Definition: dcaenc.h:32
int force_refresh
Definition: ffplay.c:203
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2323
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare 2 timestamps each in its own timebases.
Definition: mathematics.c:145
AVDictionary * sws_dict
Definition: cmdutils.c:66
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3503
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2304
static void update_sample_display(VideoState *is, short *samples, int samples_size)
Definition: ffplay.c:2246
void av_rdft_calc(RDFTContext *s, FFTSample *data)
uint32_t end_display_time
Definition: avcodec.h:3802
static void decoder_destroy(Decoder *d)
Definition: ffplay.c:633
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:3805
static int genpts
Definition: ffplay.c:328
AVCodecContext * codec
Codec context associated with this stream.
Definition: avformat.h:861
static AVPacket flush_pkt
Definition: ffplay.c:355
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:427
double frame_last_returned_time
Definition: ffplay.c:276
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: avfilter.c:487
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1481
static const char * subtitle_codec_name
Definition: ffplay.c:339
static int subtitle_disable
Definition: ffplay.c:319
void av_format_inject_global_side_data(AVFormatContext *s)
This function will cause global side data to be injected in the next packet of each stream as well as...
Definition: utils.c:130
int max_size
Definition: ffplay.c:169
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1329
int step
Definition: ffplay.c:291
SDL_Thread * decoder_tid
Definition: ffplay.c:196
static SDL_Surface * screen
Definition: ffplay.c:360
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:64
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
Definition: utils.c:3647
SDL_mutex * mutex
Definition: ffplay.c:120
int audio_write_buf_size
Definition: ffplay.c:249
AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
Definition: format.c:160
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int64_t nb_samples_notify, AVRational time_base)
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:126
#define SUBPICTURE_QUEUE_SIZE
Definition: ffplay.c:125
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:242
struct MyAVPacketList * next
Definition: ffplay.c:110
#define AV_CH_LAYOUT_STEREO_DOWNMIX
static double lum(void *priv, double x, double y, int plane)
Definition: vf_fftfilt.c:74
char filename[1024]
input or output filename
Definition: avformat.h:1349
AVPicture pict
data+linesize for the bitmap of this subtitle.
Definition: avcodec.h:3784
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:246
#define FFMIN(a, b)
Definition: common.h:81
SDL_mutex * mutex
Definition: ffplay.c:172
float y
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:397
int windex
Definition: ffplay.c:167
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:611
static int cursor_hidden
Definition: ffplay.c:343
static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
Definition: ffplay.c:539
AVSubtitle sub
Definition: ffplay.c:151
static int lockmgr(void **mtx, enum AVLockOp op)
Definition: ffplay.c:3661
int width
picture width / height.
Definition: avcodec.h:1681
int main(int argc, char **argv)
Definition: ffplay.c:3681
int height
Definition: ffplay.c:160
static void show_usage(void)
Definition: ffplay.c:3623
int nb_packets
Definition: ffplay.c:116
static int opt_width(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3475
static int get_video_frame(VideoState *is, AVFrame *frame)
Definition: ffplay.c:1719
int frame_drops_late
Definition: ffplay.c:257
struct AudioParams audio_src
Definition: ffplay.c:250
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
Definition: ffplay.c:3243
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1247
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:75
struct SwrContext * swr_alloc_set_opts(struct SwrContext *s, int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
Definition: swresample.c:59
static int fast
Definition: ffplay.c:327
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2225
int last_i_start
Definition: ffplay.c:264
uint16_t format
Definition: avcodec.h:3800
char filename[1024]
Definition: ffplay.c:289
int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: utils.c:2558
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
Definition: ffplay.c:841
#define OPT_INT64
Definition: cmdutils.h:170
MyAVPacketList * last_pkt
Definition: ffplay.c:115
Definition: avfft.h:72
void av_rdft_end(RDFTContext *s)
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1352
int n
Definition: avisynth_c.h:547
static int frame_queue_prev(FrameQueue *f)
Definition: ffplay.c:753
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2314
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
static int is_full_screen
Definition: ffplay.c:352
#define AV_SYNC_THRESHOLD_MAX
Definition: ffplay.c:80
static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
Definition: ffplay.c:799
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:73
static int decoder_reorder_pts
Definition: ffplay.c:330
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:95
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1241
int16_t sample_array[SAMPLE_ARRAY_SIZE]
Definition: ffplay.c:262
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:1360
int paused
Definition: ffplay.c:144
static const char * input_filename
Definition: ffplay.c:309
#define FF_ARRAY_ELEMS(a)
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:819
#define av_log2
Definition: intmath.h:100
static Frame * frame_queue_peek_last(FrameQueue *f)
Definition: ffplay.c:690
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3630
int av_codec_get_max_lowres(const AVCodec *codec)
Definition: utils.c:1303
int64_t pos
Definition: ffplay.c:155
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
int last_audio_stream
Definition: ffplay.c:302
Stream structure.
Definition: avformat.h:842
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: utils.c:3303
const AVClass * avfilter_get_class(void)
Definition: avfilter.c:1192
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:1355
static int fs_screen_width
Definition: ffplay.c:311
#define SAMPLE_CORRECTION_PERCENT_MAX
Definition: ffplay.c:87
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:232
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:4213
static int screen_height
Definition: ffplay.c:316
static int opt_duration(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3524
sample_rate
static AVInputFormat * iformat
Definition: ffprobe.c:214
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
int64_t next_pts
Definition: ffplay.c:194
static int autoexit
Definition: ffplay.c:331
AVFrame * frame
Definition: ffplay.c:150
AVS_Value src
Definition: avisynth_c.h:482
int serial
Definition: ffplay.c:143
enum AVMediaType codec_type
Definition: avcodec.h:1510
int(* read_seek)(struct AVFormatContext *, int stream_index, int64_t timestamp, int flags)
Seek to a given timestamp relative to the frames in stream component stream_index.
Definition: avformat.h:720
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
Definition: ffplay.c:892
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:59
enum AVCodecID codec_id
Definition: avcodec.h:1519
static void do_exit(VideoState *is)
Definition: ffplay.c:1139
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:252
char * av_strdup(const char *s)
Duplicate the string s.
Definition: mem.c:267
int sample_rate
samples per second
Definition: avcodec.h:2262
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:199
AVIOContext * pb
I/O context.
Definition: avformat.h:1315
#define AV_CODEC_FLAG2_FAST
Definition: avcodec.h:803
static int loop
Definition: ffplay.c:334
int last_paused
Definition: ffplay.c:205
static int exit_on_keydown
Definition: ffplay.c:332
FFT functions.
main external API structure.
Definition: avcodec.h:1502
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: utils.c:3016
Decoder subdec
Definition: ffplay.c:225
int av_copy_packet(AVPacket *dst, const AVPacket *src)
Copy packet, including contents.
Definition: avpacket.c:265
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
Definition: swresample.c:140
double max_frame_duration
Definition: ffplay.c:281
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:2883
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:252
Clock vidclk
Definition: ffplay.c:216
int x
Definition: f_ebur128.c:90
static void decoder_start(Decoder *d, int(*fn)(void *), void *arg)
Definition: ffplay.c:2059
#define fp
Definition: regdef.h:44
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:1036
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
#define AVFMT_NOGENSEARCH
Format does not allow to fall back on generic search.
Definition: avformat.h:479
static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp)
Definition: ffplay.c:1390
GLint GLenum type
Definition: opengl_enc.c:105
static const char * window_title
Definition: ffplay.c:310
double pts
Definition: ffplay.c:139
static Frame * frame_queue_peek_next(FrameQueue *f)
Definition: ffplay.c:685
static int audio_thread(void *arg)
Definition: ffplay.c:1972
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:69
static int av_sync_type
Definition: ffplay.c:324
int pkt_serial
Definition: ffplay.c:188
BYTE int const BYTE int int int height
Definition: avisynth_c.h:676
static void frame_queue_signal(FrameQueue *f)
Definition: ffplay.c:673
Describe the class of an AVClass context structure.
Definition: log.h:67
int sample_rate
Sample rate of the audio data.
Definition: frame.h:422
int configure_filtergraph(FilterGraph *fg)
static void free_picture(Frame *vp)
Definition: ffplay.c:884
int av_frame_get_channels(const AVFrame *frame)
Definition: f_ebur128.c:90
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:88
static const AVFilterPad inputs[]
Definition: af_ashowinfo.c:239
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:1363
PacketQueue audioq
Definition: ffplay.c:241
int packet_pending
Definition: ffplay.c:190
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:117
int64_t seek_pos
Definition: ffplay.c:209
rational number numerator/denominator
Definition: rational.h:43
#define AV_SYNC_THRESHOLD_MIN
Definition: ffplay.c:78
int allocated
Definition: ffplay.c:157
struct SwsContext * sub_convert_ctx
Definition: ffplay.c:285
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:286
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds...
Definition: cmdutils.c:145
#define OPT_STRING
Definition: cmdutils.h:164
static void video_audio_display(VideoState *s)
Definition: ffplay.c:974
SDL_cond * cond
Definition: ffplay.c:121
#define EXTERNAL_CLOCK_SPEED_MAX
Definition: ffplay.c:91
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2278
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
Definition: ffplay.c:547
AVMediaType
Definition: avutil.h:191
discard useless packets like 0 size packets in avi
Definition: avcodec.h:684
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:101
static int is_realtime(AVFormatContext *s)
Definition: ffplay.c:2749
static void check_external_clock_speed(VideoState *is)
Definition: ffplay.c:1304
int queue_attachments_req
Definition: ffplay.c:206
unsigned nb_filters
Definition: avfilter.h:1179
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:1349
#define snprintf
Definition: snprintf.h:34
static void frame_queue_unref_item(Frame *vp)
Definition: ffplay.c:637
int error
contains the error code or 0 if no error happened
Definition: avio.h:145
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t *out_arg[SWR_CH_MAX], int out_count, const uint8_t *in_arg[SWR_CH_MAX], int in_count)
Definition: swresample.c:695
misc parsing utilities
SDL_cond * empty_queue_cond
Definition: ffplay.c:191
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:86
#define FF_ALLOC_EVENT
Definition: ffplay.c:357
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1478
int audio_stream
Definition: ffplay.c:230
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:209
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
Definition: utils.c:2222
int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:262
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max)
Parse a string and return its corresponding value as a double.
Definition: cmdutils.c:124
static int stream_component_open(VideoState *is, int stream_index)
Definition: ffplay.c:2538
char * name
unique name for this input/output in the list
Definition: avfilter.h:1357
static int64_t cursor_last_shown
Definition: ffplay.c:342
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
Definition: ffplay.c:643
static int opt_frame_size(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3469
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:464
static int64_t pts
Global timestamp for the audio frames.
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:79
AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
Definition: utils.c:3021
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
Definition: ffplay.c:1613
static int flags
Definition: cpu.c:47
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds. ...
Definition: avformat.h:1358
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:182
int frame_drops_early
Definition: ffplay.c:256
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:104
static int synchronize_audio(VideoState *is, int nb_samples)
Definition: ffplay.c:2266
#define EXTERNAL_CLOCK_MIN_FRAMES
Definition: ffplay.c:69
int sample_array_index
Definition: ffplay.c:263
SDL_cond * continue_read_thread
Definition: ffplay.c:304
int64_t start
Definition: avformat.h:1239
static void frame_queue_destory(FrameQueue *f)
Definition: ffplay.c:660
static int64_t frame_queue_last_pos(FrameQueue *f)
Definition: ffplay.c:767
#define OPT_BOOL
Definition: cmdutils.h:162
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:269
double speed
Definition: ffplay.c:142
static int exit_on_mousedown
Definition: ffplay.c:333
static int op(uint8_t **dst, const uint8_t *dst_end, GetByteContext *gb, int pixel, int count, int *x, int width, int linesize)
Perform decode operation.
Definition: anm.c:78
#define CODEC_FLAG_EMU_EDGE
Definition: avcodec.h:985
Main libavformat public API header.
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1024
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:499
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:63
static int video_thread(void *arg)
Definition: ffplay.c:2065
#define OPT_INT
Definition: cmdutils.h:167
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
Definition: graphparser.c:182
static void set_default_window_size(int width, int height, AVRational sar)
Definition: ffplay.c:1162
AVDictionary * codec_opts
Definition: cmdutils.c:68
struct AudioParams audio_tgt
Definition: ffplay.c:254
#define ALPHA_BLEND(a, oldp, newp, s)
Definition: ffplay.c:834
AVRational av_codec_get_pkt_timebase(const AVCodecContext *avctx)
Free mutex resources.
Definition: avcodec.h:5534
if(ret< 0)
Definition: vf_mcdeint.c:280
uint8_t * audio_buf
Definition: ffplay.c:244
enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
Get the packed alternative form of the given sample format.
Definition: samplefmt.c:73
static int display_disable
Definition: ffplay.c:322
static int video_disable
Definition: ffplay.c:318
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: utils.c:3084
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base...
Definition: avformat.h:894
signed 16 bits
Definition: samplefmt.h:62
int audio_buf_index
Definition: ffplay.c:248
uint8_t * audio_buf1
Definition: ffplay.c:245
static int opt_seek(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3518
static double c[64]
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set that converts the value to a string and stores it...
Definition: dict.c:143
static int screen_width
Definition: ffplay.c:315
PacketQueue * pktq
Definition: ffplay.c:174
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:905
uint32_t start_display_time
Definition: avcodec.h:3801
FFTSample * rdft_data
Definition: ffplay.c:267
static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial)
Definition: ffplay.c:1402
int audio_clock_serial
Definition: ffplay.c:235
#define AV_OPT_SEARCH_FAKE_OBJ
The obj passed to av_opt_find() is fake – only a double pointer to AVClass instead of a required poin...
Definition: opt.h:620
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1238
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:49
char * key
Definition: dict.h:87
int den
denominator
Definition: rational.h:45
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents...
Definition: cmdutils.c:79
PacketQueue subtitleq
Definition: ffplay.c:273
struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1285
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:3721
static int lowres
Definition: ffplay.c:329
int viddec_height
Definition: ffplay.c:228
int eof
Definition: ffplay.c:287
#define GROW_ARRAY(array, nb_elems)
Definition: cmdutils.h:567
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define av_free(p)
static int infinite_buffer
Definition: ffplay.c:336
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:480
double duration
Definition: ffplay.c:154
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:61
char * value
Definition: dict.h:88
int eof_reached
true if eof reached
Definition: avio.h:139
#define NAN
Definition: math.h:28
int len
int channels
number of audio channels
Definition: avcodec.h:2263
int64_t av_frame_get_pkt_pos(const AVFrame *frame)
unsigned int audio_buf1_size
Definition: ffplay.c:247
SDL_Thread * read_tid
Definition: ffplay.c:200
AVPacket pkt
Definition: ffplay.c:184
int frame_size
Definition: ffplay.c:134
void av_log_set_flags(int arg)
Definition: log.c:387
int64_t start_pts
Definition: ffplay.c:192
int abort_request
Definition: ffplay.c:202
static void decoder_abort(Decoder *d, FrameQueue *fq)
Definition: ffplay.c:776
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:421
double last_updated
Definition: ffplay.c:141
Decoder viddec
Definition: ffplay.c:224
AVDictionary * swr_opts
Definition: cmdutils.c:67
int height
Definition: ffplay.c:290
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
Definition: cmdutils.c:186
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:1604
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs, void *log_ctx)
Add a graph described by a string to a graph.
Definition: graphparser.c:530
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> out
An instance of a filter.
Definition: avfilter.h:633
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1422
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
int bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1375
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:701
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1368
int height
Definition: frame.h:220
static const char * video_codec_name
Definition: ffplay.c:340
#define MAX_QUEUE_SIZE
Definition: ffplay.c:67
#define av_freep(p)
int64_t pts_correction_num_faulty_pts
Current statistics for PTS correction.
Definition: avcodec.h:3345
PacketQueue * queue
Definition: ffplay.c:186
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:628
static Frame * frame_queue_peek_writable(FrameQueue *f)
Definition: ffplay.c:695
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:72
static int subtitle_thread(void *arg)
Definition: ffplay.c:2174
FrameQueue subpq
Definition: ffplay.c:220
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
static double get_master_clock(VideoState *is)
Definition: ffplay.c:1286
#define av_malloc_array(a, b)
int size
Definition: ffplay.c:168
int avio_feof(AVIOContext *s)
feof() equivalent for AVIOContext.
Definition: aviobuf.c:301
#define FF_QUIT_EVENT
Definition: ffplay.c:358
int xleft
Definition: ffplay.c:290
#define FFSWAP(type, a, b)
Definition: common.h:84
int nb_channels
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2050
int stream_index
Definition: avcodec.h:1425
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:884
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:98
int subtitle_stream
Definition: ffplay.c:271
unsigned int audio_buf_size
Definition: ffplay.c:246
int64_t seek_rel
Definition: ffplay.c:210
int realtime
Definition: ffplay.c:213
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:215
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:907
static void video_display(VideoState *is)
Definition: ffplay.c:1211
static const char * wanted_stream_spec[AVMEDIA_TYPE_NB]
Definition: ffplay.c:320
SDL_Overlay * bmp
Definition: ffplay.c:156
static int show_status
Definition: ffplay.c:323
static int compute_mod(int a, int b)
Definition: ffplay.c:969
AVPixelFormat
Pixel format.
Definition: pixfmt.h:61
This structure stores compressed data.
Definition: avcodec.h:1400
void av_register_all(void)
Initialize libavformat and register all the muxers, demuxers and protocols.
Definition: allformats.c:51
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:369
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
Definition: ffplay.c:2427
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:225
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:252
static void sync_clock_to_slave(Clock *c, Clock *slave)
Definition: ffplay.c:1261
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:857
static void toggle_full_screen(VideoState *is)
Definition: ffplay.c:3215
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1416
static int fs_screen_height
Definition: ffplay.c:312
double last_vis_time
Definition: ffplay.c:269
AVPacket attached_pic
For streams with AV_DISPOSITION_ATTACHED_PIC disposition, this packet will contain the attached pictu...
Definition: avformat.h:934
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:240
#define av_unused
Definition: attributes.h:118
#define tb
Definition: regdef.h:68
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:155
AVFormatContext * ic
Definition: ffplay.c:212
simple arithmetic expression evaluator
#define V
Definition: avdct.c:30
static int width
static Frame * frame_queue_peek_readable(FrameQueue *f)
Definition: ffplay.c:711
static int16_t block[64]
Definition: dct-test.c:110