FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
34 #include "libavutil/eval.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/avassert.h"
42 #include "libavutil/time.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavcodec/avfft.h"
49 
50 #if CONFIG_AVFILTER
51 # include "libavfilter/avfilter.h"
52 # include "libavfilter/buffersink.h"
53 # include "libavfilter/buffersrc.h"
54 #endif
55 
56 #include <SDL.h>
57 #include <SDL_thread.h>
58 
59 #include "cmdutils.h"
60 
61 #include <assert.h>
62 
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65 
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 25
68 #define EXTERNAL_CLOCK_MIN_FRAMES 2
69 #define EXTERNAL_CLOCK_MAX_FRAMES 10
70 
71 /* Minimum SDL audio buffer size, in samples. */
72 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
73 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
74 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
75 
76 /* Step size for volume control in dB */
77 #define SDL_VOLUME_STEP (0.75)
78 
79 /* no AV sync correction is done if below the minimum AV sync threshold */
80 #define AV_SYNC_THRESHOLD_MIN 0.04
81 /* AV sync correction is done if above the maximum AV sync threshold */
82 #define AV_SYNC_THRESHOLD_MAX 0.1
83 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
84 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
85 /* no AV correction is done if too big error */
86 #define AV_NOSYNC_THRESHOLD 10.0
87 
88 /* maximum audio speed change to get correct sync */
89 #define SAMPLE_CORRECTION_PERCENT_MAX 10
90 
91 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
92 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
93 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
94 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
95 
96 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
97 #define AUDIO_DIFF_AVG_NB 20
98 
99 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
100 #define REFRESH_RATE 0.01
101 
102 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
103 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
104 #define SAMPLE_ARRAY_SIZE (8 * 65536)
105 
106 #define CURSOR_HIDE_DELAY 1000000
107 
108 #define USE_ONEPASS_SUBTITLE_RENDER 1
109 
110 static unsigned sws_flags = SWS_BICUBIC;
111 
112 typedef struct MyAVPacketList {
115  int serial;
117 
118 typedef struct PacketQueue {
121  int size;
122  int64_t duration;
124  int serial;
125  SDL_mutex *mutex;
126  SDL_cond *cond;
127 } PacketQueue;
128 
129 #define VIDEO_PICTURE_QUEUE_SIZE 3
130 #define SUBPICTURE_QUEUE_SIZE 16
131 #define SAMPLE_QUEUE_SIZE 9
132 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
133 
134 typedef struct AudioParams {
135  int freq;
136  int channels;
137  int64_t channel_layout;
141 } AudioParams;
142 
143 typedef struct Clock {
144  double pts; /* clock base */
145  double pts_drift; /* clock base minus time at which we updated the clock */
146  double last_updated;
147  double speed;
148  int serial; /* clock is based on a packet with this serial */
149  int paused;
150  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
151 } Clock;
152 
153 /* Common struct for handling all types of decoded data and allocated render buffers. */
154 typedef struct Frame {
157  int serial;
158  double pts; /* presentation timestamp for the frame */
159  double duration; /* estimated duration of the frame */
160  int64_t pos; /* byte position of the frame in the input file */
161  int width;
162  int height;
163  int format;
165  int uploaded;
166  int flip_v;
167 } Frame;
168 
169 typedef struct FrameQueue {
171  int rindex;
172  int windex;
173  int size;
174  int max_size;
177  SDL_mutex *mutex;
178  SDL_cond *cond;
180 } FrameQueue;
181 
182 enum {
183  AV_SYNC_AUDIO_MASTER, /* default choice */
185  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
186 };
187 
188 typedef struct Decoder {
193  int finished;
195  SDL_cond *empty_queue_cond;
196  int64_t start_pts;
198  int64_t next_pts;
200  SDL_Thread *decoder_tid;
201 } Decoder;
202 
203 typedef struct VideoState {
204  SDL_Thread *read_tid;
208  int paused;
211  int seek_req;
213  int64_t seek_pos;
214  int64_t seek_rel;
217  int realtime;
218 
222 
226 
230 
232 
234 
235  double audio_clock;
237  double audio_diff_cum; /* used for AV difference average computation */
246  unsigned int audio_buf_size; /* in bytes */
247  unsigned int audio_buf1_size;
248  int audio_buf_index; /* in bytes */
251  int muted;
253 #if CONFIG_AVFILTER
254  struct AudioParams audio_filter_src;
255 #endif
260 
261  enum ShowMode {
263  } show_mode;
270  int xpos;
272  SDL_Texture *vis_texture;
273  SDL_Texture *sub_texture;
274  SDL_Texture *vid_texture;
275 
279 
280  double frame_timer;
286  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
289  int eof;
290 
291  char *filename;
293  int step;
294 
295 #if CONFIG_AVFILTER
296  int vfilter_idx;
297  AVFilterContext *in_video_filter; // the first filter in the video chain
298  AVFilterContext *out_video_filter; // the last filter in the video chain
299  AVFilterContext *in_audio_filter; // the first filter in the audio chain
300  AVFilterContext *out_audio_filter; // the last filter in the audio chain
301  AVFilterGraph *agraph; // audio filter graph
302 #endif
303 
305 
307 } VideoState;
308 
309 /* options specified by the user */
311 static const char *input_filename;
312 static const char *window_title;
313 static int default_width = 640;
314 static int default_height = 480;
315 static int screen_width = 0;
316 static int screen_height = 0;
317 static int audio_disable;
318 static int video_disable;
319 static int subtitle_disable;
320 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
321 static int seek_by_bytes = -1;
322 static int display_disable;
323 static int borderless;
324 static int startup_volume = 100;
325 static int show_status = 1;
327 static int64_t start_time = AV_NOPTS_VALUE;
328 static int64_t duration = AV_NOPTS_VALUE;
329 static int fast = 0;
330 static int genpts = 0;
331 static int lowres = 0;
332 static int decoder_reorder_pts = -1;
333 static int autoexit;
334 static int exit_on_keydown;
335 static int exit_on_mousedown;
336 static int loop = 1;
337 static int framedrop = -1;
338 static int infinite_buffer = -1;
339 static enum ShowMode show_mode = SHOW_MODE_NONE;
340 static const char *audio_codec_name;
341 static const char *subtitle_codec_name;
342 static const char *video_codec_name;
343 double rdftspeed = 0.02;
344 static int64_t cursor_last_shown;
345 static int cursor_hidden = 0;
346 #if CONFIG_AVFILTER
347 static const char **vfilters_list = NULL;
348 static int nb_vfilters = 0;
349 static char *afilters = NULL;
350 #endif
351 static int autorotate = 1;
352 static int find_stream_info = 1;
353 
354 /* current context */
355 static int is_full_screen;
356 static int64_t audio_callback_time;
357 
359 
360 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
361 
362 static SDL_Window *window;
363 static SDL_Renderer *renderer;
364 
365 static const struct TextureFormatEntry {
369  { AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
370  { AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
371  { AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
372  { AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
373  { AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
374  { AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
375  { AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
376  { AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
377  { AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
378  { AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
379  { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
380  { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
381  { AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
382  { AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
383  { AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
384  { AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
385  { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
386  { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
387  { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
388  { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
389 };
390 
391 #if CONFIG_AVFILTER
392 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
393 {
394  GROW_ARRAY(vfilters_list, nb_vfilters);
395  vfilters_list[nb_vfilters - 1] = arg;
396  return 0;
397 }
398 #endif
399 
400 static inline
401 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
402  enum AVSampleFormat fmt2, int64_t channel_count2)
403 {
404  /* If channel count == 1, planar and non-planar formats are the same */
405  if (channel_count1 == 1 && channel_count2 == 1)
407  else
408  return channel_count1 != channel_count2 || fmt1 != fmt2;
409 }
410 
411 static inline
412 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
413 {
414  if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
415  return channel_layout;
416  else
417  return 0;
418 }
419 
421 {
422  MyAVPacketList *pkt1;
423 
424  if (q->abort_request)
425  return -1;
426 
427  pkt1 = av_malloc(sizeof(MyAVPacketList));
428  if (!pkt1)
429  return -1;
430  pkt1->pkt = *pkt;
431  pkt1->next = NULL;
432  if (pkt == &flush_pkt)
433  q->serial++;
434  pkt1->serial = q->serial;
435 
436  if (!q->last_pkt)
437  q->first_pkt = pkt1;
438  else
439  q->last_pkt->next = pkt1;
440  q->last_pkt = pkt1;
441  q->nb_packets++;
442  q->size += pkt1->pkt.size + sizeof(*pkt1);
443  q->duration += pkt1->pkt.duration;
444  /* XXX: should duplicate packet data in DV case */
445  SDL_CondSignal(q->cond);
446  return 0;
447 }
448 
450 {
451  int ret;
452 
453  SDL_LockMutex(q->mutex);
454  ret = packet_queue_put_private(q, pkt);
455  SDL_UnlockMutex(q->mutex);
456 
457  if (pkt != &flush_pkt && ret < 0)
458  av_packet_unref(pkt);
459 
460  return ret;
461 }
462 
463 static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
464 {
465  AVPacket pkt1, *pkt = &pkt1;
466  av_init_packet(pkt);
467  pkt->data = NULL;
468  pkt->size = 0;
469  pkt->stream_index = stream_index;
470  return packet_queue_put(q, pkt);
471 }
472 
473 /* packet queue handling */
475 {
476  memset(q, 0, sizeof(PacketQueue));
477  q->mutex = SDL_CreateMutex();
478  if (!q->mutex) {
479  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
480  return AVERROR(ENOMEM);
481  }
482  q->cond = SDL_CreateCond();
483  if (!q->cond) {
484  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
485  return AVERROR(ENOMEM);
486  }
487  q->abort_request = 1;
488  return 0;
489 }
490 
492 {
493  MyAVPacketList *pkt, *pkt1;
494 
495  SDL_LockMutex(q->mutex);
496  for (pkt = q->first_pkt; pkt; pkt = pkt1) {
497  pkt1 = pkt->next;
498  av_packet_unref(&pkt->pkt);
499  av_freep(&pkt);
500  }
501  q->last_pkt = NULL;
502  q->first_pkt = NULL;
503  q->nb_packets = 0;
504  q->size = 0;
505  q->duration = 0;
506  SDL_UnlockMutex(q->mutex);
507 }
508 
510 {
512  SDL_DestroyMutex(q->mutex);
513  SDL_DestroyCond(q->cond);
514 }
515 
517 {
518  SDL_LockMutex(q->mutex);
519 
520  q->abort_request = 1;
521 
522  SDL_CondSignal(q->cond);
523 
524  SDL_UnlockMutex(q->mutex);
525 }
526 
528 {
529  SDL_LockMutex(q->mutex);
530  q->abort_request = 0;
531  packet_queue_put_private(q, &flush_pkt);
532  SDL_UnlockMutex(q->mutex);
533 }
534 
535 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
536 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
537 {
538  MyAVPacketList *pkt1;
539  int ret;
540 
541  SDL_LockMutex(q->mutex);
542 
543  for (;;) {
544  if (q->abort_request) {
545  ret = -1;
546  break;
547  }
548 
549  pkt1 = q->first_pkt;
550  if (pkt1) {
551  q->first_pkt = pkt1->next;
552  if (!q->first_pkt)
553  q->last_pkt = NULL;
554  q->nb_packets--;
555  q->size -= pkt1->pkt.size + sizeof(*pkt1);
556  q->duration -= pkt1->pkt.duration;
557  *pkt = pkt1->pkt;
558  if (serial)
559  *serial = pkt1->serial;
560  av_free(pkt1);
561  ret = 1;
562  break;
563  } else if (!block) {
564  ret = 0;
565  break;
566  } else {
567  SDL_CondWait(q->cond, q->mutex);
568  }
569  }
570  SDL_UnlockMutex(q->mutex);
571  return ret;
572 }
573 
574 static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
575  memset(d, 0, sizeof(Decoder));
576  d->avctx = avctx;
577  d->queue = queue;
578  d->empty_queue_cond = empty_queue_cond;
580  d->pkt_serial = -1;
581 }
582 
584  int ret = AVERROR(EAGAIN);
585 
586  for (;;) {
587  AVPacket pkt;
588 
589  if (d->queue->serial == d->pkt_serial) {
590  do {
591  if (d->queue->abort_request)
592  return -1;
593 
594  switch (d->avctx->codec_type) {
595  case AVMEDIA_TYPE_VIDEO:
596  ret = avcodec_receive_frame(d->avctx, frame);
597  if (ret >= 0) {
598  if (decoder_reorder_pts == -1) {
599  frame->pts = frame->best_effort_timestamp;
600  } else if (!decoder_reorder_pts) {
601  frame->pts = frame->pkt_dts;
602  }
603  }
604  break;
605  case AVMEDIA_TYPE_AUDIO:
606  ret = avcodec_receive_frame(d->avctx, frame);
607  if (ret >= 0) {
608  AVRational tb = (AVRational){1, frame->sample_rate};
609  if (frame->pts != AV_NOPTS_VALUE)
610  frame->pts = av_rescale_q(frame->pts, av_codec_get_pkt_timebase(d->avctx), tb);
611  else if (d->next_pts != AV_NOPTS_VALUE)
612  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
613  if (frame->pts != AV_NOPTS_VALUE) {
614  d->next_pts = frame->pts + frame->nb_samples;
615  d->next_pts_tb = tb;
616  }
617  }
618  break;
619  }
620  if (ret == AVERROR_EOF) {
621  d->finished = d->pkt_serial;
623  return 0;
624  }
625  if (ret >= 0)
626  return 1;
627  } while (ret != AVERROR(EAGAIN));
628  }
629 
630  do {
631  if (d->queue->nb_packets == 0)
632  SDL_CondSignal(d->empty_queue_cond);
633  if (d->packet_pending) {
634  av_packet_move_ref(&pkt, &d->pkt);
635  d->packet_pending = 0;
636  } else {
637  if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
638  return -1;
639  }
640  } while (d->queue->serial != d->pkt_serial);
641 
642  if (pkt.data == flush_pkt.data) {
644  d->finished = 0;
645  d->next_pts = d->start_pts;
646  d->next_pts_tb = d->start_pts_tb;
647  } else {
649  int got_frame = 0;
650  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &pkt);
651  if (ret < 0) {
652  ret = AVERROR(EAGAIN);
653  } else {
654  if (got_frame && !pkt.data) {
655  d->packet_pending = 1;
656  av_packet_move_ref(&d->pkt, &pkt);
657  }
658  ret = got_frame ? 0 : (pkt.data ? AVERROR(EAGAIN) : AVERROR_EOF);
659  }
660  } else {
661  if (avcodec_send_packet(d->avctx, &pkt) == AVERROR(EAGAIN)) {
662  av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
663  d->packet_pending = 1;
664  av_packet_move_ref(&d->pkt, &pkt);
665  }
666  }
667  av_packet_unref(&pkt);
668  }
669  }
670 }
671 
672 static void decoder_destroy(Decoder *d) {
673  av_packet_unref(&d->pkt);
675 }
676 
678 {
679  av_frame_unref(vp->frame);
680  avsubtitle_free(&vp->sub);
681 }
682 
683 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
684 {
685  int i;
686  memset(f, 0, sizeof(FrameQueue));
687  if (!(f->mutex = SDL_CreateMutex())) {
688  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
689  return AVERROR(ENOMEM);
690  }
691  if (!(f->cond = SDL_CreateCond())) {
692  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
693  return AVERROR(ENOMEM);
694  }
695  f->pktq = pktq;
696  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
697  f->keep_last = !!keep_last;
698  for (i = 0; i < f->max_size; i++)
699  if (!(f->queue[i].frame = av_frame_alloc()))
700  return AVERROR(ENOMEM);
701  return 0;
702 }
703 
705 {
706  int i;
707  for (i = 0; i < f->max_size; i++) {
708  Frame *vp = &f->queue[i];
710  av_frame_free(&vp->frame);
711  }
712  SDL_DestroyMutex(f->mutex);
713  SDL_DestroyCond(f->cond);
714 }
715 
717 {
718  SDL_LockMutex(f->mutex);
719  SDL_CondSignal(f->cond);
720  SDL_UnlockMutex(f->mutex);
721 }
722 
724 {
725  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
726 }
727 
729 {
730  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
731 }
732 
734 {
735  return &f->queue[f->rindex];
736 }
737 
739 {
740  /* wait until we have space to put a new frame */
741  SDL_LockMutex(f->mutex);
742  while (f->size >= f->max_size &&
743  !f->pktq->abort_request) {
744  SDL_CondWait(f->cond, f->mutex);
745  }
746  SDL_UnlockMutex(f->mutex);
747 
748  if (f->pktq->abort_request)
749  return NULL;
750 
751  return &f->queue[f->windex];
752 }
753 
755 {
756  /* wait until we have a readable a new frame */
757  SDL_LockMutex(f->mutex);
758  while (f->size - f->rindex_shown <= 0 &&
759  !f->pktq->abort_request) {
760  SDL_CondWait(f->cond, f->mutex);
761  }
762  SDL_UnlockMutex(f->mutex);
763 
764  if (f->pktq->abort_request)
765  return NULL;
766 
767  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
768 }
769 
771 {
772  if (++f->windex == f->max_size)
773  f->windex = 0;
774  SDL_LockMutex(f->mutex);
775  f->size++;
776  SDL_CondSignal(f->cond);
777  SDL_UnlockMutex(f->mutex);
778 }
779 
781 {
782  if (f->keep_last && !f->rindex_shown) {
783  f->rindex_shown = 1;
784  return;
785  }
787  if (++f->rindex == f->max_size)
788  f->rindex = 0;
789  SDL_LockMutex(f->mutex);
790  f->size--;
791  SDL_CondSignal(f->cond);
792  SDL_UnlockMutex(f->mutex);
793 }
794 
795 /* return the number of undisplayed frames in the queue */
797 {
798  return f->size - f->rindex_shown;
799 }
800 
801 /* return last shown position */
803 {
804  Frame *fp = &f->queue[f->rindex];
805  if (f->rindex_shown && fp->serial == f->pktq->serial)
806  return fp->pos;
807  else
808  return -1;
809 }
810 
811 static void decoder_abort(Decoder *d, FrameQueue *fq)
812 {
814  frame_queue_signal(fq);
815  SDL_WaitThread(d->decoder_tid, NULL);
816  d->decoder_tid = NULL;
818 }
819 
820 static inline void fill_rectangle(int x, int y, int w, int h)
821 {
822  SDL_Rect rect;
823  rect.x = x;
824  rect.y = y;
825  rect.w = w;
826  rect.h = h;
827  if (w && h)
828  SDL_RenderFillRect(renderer, &rect);
829 }
830 
831 static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
832 {
833  Uint32 format;
834  int access, w, h;
835  if (SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
836  void *pixels;
837  int pitch;
838  SDL_DestroyTexture(*texture);
839  if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
840  return -1;
841  if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
842  return -1;
843  if (init_texture) {
844  if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
845  return -1;
846  memset(pixels, 0, pitch * new_height);
847  SDL_UnlockTexture(*texture);
848  }
849  av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
850  }
851  return 0;
852 }
853 
854 static void calculate_display_rect(SDL_Rect *rect,
855  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
856  int pic_width, int pic_height, AVRational pic_sar)
857 {
858  float aspect_ratio;
859  int width, height, x, y;
860 
861  if (pic_sar.num == 0)
862  aspect_ratio = 0;
863  else
864  aspect_ratio = av_q2d(pic_sar);
865 
866  if (aspect_ratio <= 0.0)
867  aspect_ratio = 1.0;
868  aspect_ratio *= (float)pic_width / (float)pic_height;
869 
870  /* XXX: we suppose the screen has a 1.0 pixel ratio */
871  height = scr_height;
872  width = lrint(height * aspect_ratio) & ~1;
873  if (width > scr_width) {
874  width = scr_width;
875  height = lrint(width / aspect_ratio) & ~1;
876  }
877  x = (scr_width - width) / 2;
878  y = (scr_height - height) / 2;
879  rect->x = scr_xleft + x;
880  rect->y = scr_ytop + y;
881  rect->w = FFMAX(width, 1);
882  rect->h = FFMAX(height, 1);
883 }
884 
885 static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
886 {
887  int i;
888  *sdl_blendmode = SDL_BLENDMODE_NONE;
889  *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
890  if (format == AV_PIX_FMT_RGB32 ||
891  format == AV_PIX_FMT_RGB32_1 ||
892  format == AV_PIX_FMT_BGR32 ||
893  format == AV_PIX_FMT_BGR32_1)
894  *sdl_blendmode = SDL_BLENDMODE_BLEND;
895  for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
896  if (format == sdl_texture_format_map[i].format) {
897  *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
898  return;
899  }
900  }
901 }
902 
903 static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx) {
904  int ret = 0;
905  Uint32 sdl_pix_fmt;
906  SDL_BlendMode sdl_blendmode;
907  get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
908  if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
909  return -1;
910  switch (sdl_pix_fmt) {
911  case SDL_PIXELFORMAT_UNKNOWN:
912  /* This should only happen if we are not using avfilter... */
913  *img_convert_ctx = sws_getCachedContext(*img_convert_ctx,
914  frame->width, frame->height, frame->format, frame->width, frame->height,
916  if (*img_convert_ctx != NULL) {
917  uint8_t *pixels[4];
918  int pitch[4];
919  if (!SDL_LockTexture(*tex, NULL, (void **)pixels, pitch)) {
920  sws_scale(*img_convert_ctx, (const uint8_t * const *)frame->data, frame->linesize,
921  0, frame->height, pixels, pitch);
922  SDL_UnlockTexture(*tex);
923  }
924  } else {
925  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
926  ret = -1;
927  }
928  break;
929  case SDL_PIXELFORMAT_IYUV:
930  if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
931  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
932  frame->data[1], frame->linesize[1],
933  frame->data[2], frame->linesize[2]);
934  } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
935  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
936  frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
937  frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
938  } else {
939  av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
940  return -1;
941  }
942  break;
943  default:
944  if (frame->linesize[0] < 0) {
945  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
946  } else {
947  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
948  }
949  break;
950  }
951  return ret;
952 }
953 
955 {
956  Frame *vp;
957  Frame *sp = NULL;
958  SDL_Rect rect;
959 
960  vp = frame_queue_peek_last(&is->pictq);
961  if (is->subtitle_st) {
962  if (frame_queue_nb_remaining(&is->subpq) > 0) {
963  sp = frame_queue_peek(&is->subpq);
964 
965  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
966  if (!sp->uploaded) {
967  uint8_t* pixels[4];
968  int pitch[4];
969  int i;
970  if (!sp->width || !sp->height) {
971  sp->width = vp->width;
972  sp->height = vp->height;
973  }
974  if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
975  return;
976 
977  for (i = 0; i < sp->sub.num_rects; i++) {
978  AVSubtitleRect *sub_rect = sp->sub.rects[i];
979 
980  sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
981  sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
982  sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
983  sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
984 
986  sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
987  sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
988  0, NULL, NULL, NULL);
989  if (!is->sub_convert_ctx) {
990  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
991  return;
992  }
993  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) {
994  sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
995  0, sub_rect->h, pixels, pitch);
996  SDL_UnlockTexture(is->sub_texture);
997  }
998  }
999  sp->uploaded = 1;
1000  }
1001  } else
1002  sp = NULL;
1003  }
1004  }
1005 
1006  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1007 
1008  if (!vp->uploaded) {
1009  if (upload_texture(&is->vid_texture, vp->frame, &is->img_convert_ctx) < 0)
1010  return;
1011  vp->uploaded = 1;
1012  vp->flip_v = vp->frame->linesize[0] < 0;
1013  }
1014 
1015  SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
1016  if (sp) {
1017 #if USE_ONEPASS_SUBTITLE_RENDER
1018  SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
1019 #else
1020  int i;
1021  double xratio = (double)rect.w / (double)sp->width;
1022  double yratio = (double)rect.h / (double)sp->height;
1023  for (i = 0; i < sp->sub.num_rects; i++) {
1024  SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
1025  SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
1026  .y = rect.y + sub_rect->y * yratio,
1027  .w = sub_rect->w * xratio,
1028  .h = sub_rect->h * yratio};
1029  SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
1030  }
1031 #endif
1032  }
1033 }
1034 
1035 static inline int compute_mod(int a, int b)
1036 {
1037  return a < 0 ? a%b + b : a%b;
1038 }
1039 
1041 {
1042  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1043  int ch, channels, h, h2;
1044  int64_t time_diff;
1045  int rdft_bits, nb_freq;
1046 
1047  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1048  ;
1049  nb_freq = 1 << (rdft_bits - 1);
1050 
1051  /* compute display index : center on currently output samples */
1052  channels = s->audio_tgt.channels;
1053  nb_display_channels = channels;
1054  if (!s->paused) {
1055  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1056  n = 2 * channels;
1057  delay = s->audio_write_buf_size;
1058  delay /= n;
1059 
1060  /* to be more precise, we take into account the time spent since
1061  the last buffer computation */
1062  if (audio_callback_time) {
1063  time_diff = av_gettime_relative() - audio_callback_time;
1064  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1065  }
1066 
1067  delay += 2 * data_used;
1068  if (delay < data_used)
1069  delay = data_used;
1070 
1071  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1072  if (s->show_mode == SHOW_MODE_WAVES) {
1073  h = INT_MIN;
1074  for (i = 0; i < 1000; i += channels) {
1075  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1076  int a = s->sample_array[idx];
1077  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1078  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1079  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1080  int score = a - d;
1081  if (h < score && (b ^ c) < 0) {
1082  h = score;
1083  i_start = idx;
1084  }
1085  }
1086  }
1087 
1088  s->last_i_start = i_start;
1089  } else {
1090  i_start = s->last_i_start;
1091  }
1092 
1093  if (s->show_mode == SHOW_MODE_WAVES) {
1094  SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
1095 
1096  /* total height for one channel */
1097  h = s->height / nb_display_channels;
1098  /* graph height / 2 */
1099  h2 = (h * 9) / 20;
1100  for (ch = 0; ch < nb_display_channels; ch++) {
1101  i = i_start + ch;
1102  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1103  for (x = 0; x < s->width; x++) {
1104  y = (s->sample_array[i] * h2) >> 15;
1105  if (y < 0) {
1106  y = -y;
1107  ys = y1 - y;
1108  } else {
1109  ys = y1;
1110  }
1111  fill_rectangle(s->xleft + x, ys, 1, y);
1112  i += channels;
1113  if (i >= SAMPLE_ARRAY_SIZE)
1114  i -= SAMPLE_ARRAY_SIZE;
1115  }
1116  }
1117 
1118  SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
1119 
1120  for (ch = 1; ch < nb_display_channels; ch++) {
1121  y = s->ytop + ch * h;
1122  fill_rectangle(s->xleft, y, s->width, 1);
1123  }
1124  } else {
1125  if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
1126  return;
1127 
1128  nb_display_channels= FFMIN(nb_display_channels, 2);
1129  if (rdft_bits != s->rdft_bits) {
1130  av_rdft_end(s->rdft);
1131  av_free(s->rdft_data);
1132  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
1133  s->rdft_bits = rdft_bits;
1134  s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
1135  }
1136  if (!s->rdft || !s->rdft_data){
1137  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1138  s->show_mode = SHOW_MODE_WAVES;
1139  } else {
1140  FFTSample *data[2];
1141  SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
1142  uint32_t *pixels;
1143  int pitch;
1144  for (ch = 0; ch < nb_display_channels; ch++) {
1145  data[ch] = s->rdft_data + 2 * nb_freq * ch;
1146  i = i_start + ch;
1147  for (x = 0; x < 2 * nb_freq; x++) {
1148  double w = (x-nb_freq) * (1.0 / nb_freq);
1149  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
1150  i += channels;
1151  if (i >= SAMPLE_ARRAY_SIZE)
1152  i -= SAMPLE_ARRAY_SIZE;
1153  }
1154  av_rdft_calc(s->rdft, data[ch]);
1155  }
1156  /* Least efficient way to do this, we should of course
1157  * directly access it but it is more than fast enough. */
1158  if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
1159  pitch >>= 2;
1160  pixels += pitch * s->height;
1161  for (y = 0; y < s->height; y++) {
1162  double w = 1 / sqrt(nb_freq);
1163  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1164  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][2 * y + 0], data[1][2 * y + 1]))
1165  : a;
1166  a = FFMIN(a, 255);
1167  b = FFMIN(b, 255);
1168  pixels -= pitch;
1169  *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
1170  }
1171  SDL_UnlockTexture(s->vis_texture);
1172  }
1173  SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
1174  }
1175  if (!s->paused)
1176  s->xpos++;
1177  if (s->xpos >= s->width)
1178  s->xpos= s->xleft;
1179  }
1180 }
1181 
1182 static void stream_component_close(VideoState *is, int stream_index)
1183 {
1184  AVFormatContext *ic = is->ic;
1185  AVCodecParameters *codecpar;
1186 
1187  if (stream_index < 0 || stream_index >= ic->nb_streams)
1188  return;
1189  codecpar = ic->streams[stream_index]->codecpar;
1190 
1191  switch (codecpar->codec_type) {
1192  case AVMEDIA_TYPE_AUDIO:
1193  decoder_abort(&is->auddec, &is->sampq);
1194  SDL_CloseAudio();
1195  decoder_destroy(&is->auddec);
1196  swr_free(&is->swr_ctx);
1197  av_freep(&is->audio_buf1);
1198  is->audio_buf1_size = 0;
1199  is->audio_buf = NULL;
1200 
1201  if (is->rdft) {
1202  av_rdft_end(is->rdft);
1203  av_freep(&is->rdft_data);
1204  is->rdft = NULL;
1205  is->rdft_bits = 0;
1206  }
1207  break;
1208  case AVMEDIA_TYPE_VIDEO:
1209  decoder_abort(&is->viddec, &is->pictq);
1210  decoder_destroy(&is->viddec);
1211  break;
1212  case AVMEDIA_TYPE_SUBTITLE:
1213  decoder_abort(&is->subdec, &is->subpq);
1214  decoder_destroy(&is->subdec);
1215  break;
1216  default:
1217  break;
1218  }
1219 
1220  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1221  switch (codecpar->codec_type) {
1222  case AVMEDIA_TYPE_AUDIO:
1223  is->audio_st = NULL;
1224  is->audio_stream = -1;
1225  break;
1226  case AVMEDIA_TYPE_VIDEO:
1227  is->video_st = NULL;
1228  is->video_stream = -1;
1229  break;
1230  case AVMEDIA_TYPE_SUBTITLE:
1231  is->subtitle_st = NULL;
1232  is->subtitle_stream = -1;
1233  break;
1234  default:
1235  break;
1236  }
1237 }
1238 
1239 static void stream_close(VideoState *is)
1240 {
1241  /* XXX: use a special url_shutdown call to abort parse cleanly */
1242  is->abort_request = 1;
1243  SDL_WaitThread(is->read_tid, NULL);
1244 
1245  /* close each stream */
1246  if (is->audio_stream >= 0)
1248  if (is->video_stream >= 0)
1250  if (is->subtitle_stream >= 0)
1252 
1253  avformat_close_input(&is->ic);
1254 
1258 
1259  /* free all pictures */
1260  frame_queue_destory(&is->pictq);
1261  frame_queue_destory(&is->sampq);
1262  frame_queue_destory(&is->subpq);
1263  SDL_DestroyCond(is->continue_read_thread);
1266  av_free(is->filename);
1267  if (is->vis_texture)
1268  SDL_DestroyTexture(is->vis_texture);
1269  if (is->vid_texture)
1270  SDL_DestroyTexture(is->vid_texture);
1271  if (is->sub_texture)
1272  SDL_DestroyTexture(is->sub_texture);
1273  av_free(is);
1274 }
1275 
1276 static void do_exit(VideoState *is)
1277 {
1278  if (is) {
1279  stream_close(is);
1280  }
1281  if (renderer)
1282  SDL_DestroyRenderer(renderer);
1283  if (window)
1284  SDL_DestroyWindow(window);
1286  uninit_opts();
1287 #if CONFIG_AVFILTER
1288  av_freep(&vfilters_list);
1289 #endif
1291  if (show_status)
1292  printf("\n");
1293  SDL_Quit();
1294  av_log(NULL, AV_LOG_QUIET, "%s", "");
1295  exit(0);
1296 }
1297 
1298 static void sigterm_handler(int sig)
1299 {
1300  exit(123);
1301 }
1302 
1304 {
1305  SDL_Rect rect;
1306  calculate_display_rect(&rect, 0, 0, INT_MAX, height, width, height, sar);
1307  default_width = rect.w;
1308  default_height = rect.h;
1309 }
1310 
1311 static int video_open(VideoState *is)
1312 {
1313  int w,h;
1314 
1315  if (screen_width) {
1316  w = screen_width;
1317  h = screen_height;
1318  } else {
1319  w = default_width;
1320  h = default_height;
1321  }
1322 
1323  if (!window) {
1324  int flags = SDL_WINDOW_SHOWN;
1325  if (!window_title)
1327  if (is_full_screen)
1328  flags |= SDL_WINDOW_FULLSCREEN_DESKTOP;
1329  if (borderless)
1330  flags |= SDL_WINDOW_BORDERLESS;
1331  else
1332  flags |= SDL_WINDOW_RESIZABLE;
1333  window = SDL_CreateWindow(window_title, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, w, h, flags);
1334  SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
1335  if (window) {
1336  SDL_RendererInfo info;
1337  renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
1338  if (!renderer) {
1339  av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
1340  renderer = SDL_CreateRenderer(window, -1, 0);
1341  }
1342  if (renderer) {
1343  if (!SDL_GetRendererInfo(renderer, &info))
1344  av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", info.name);
1345  }
1346  }
1347  } else {
1348  SDL_SetWindowSize(window, w, h);
1349  }
1350 
1351  if (!window || !renderer) {
1352  av_log(NULL, AV_LOG_FATAL, "SDL: could not set video mode - exiting\n");
1353  do_exit(is);
1354  }
1355 
1356  is->width = w;
1357  is->height = h;
1358 
1359  return 0;
1360 }
1361 
1362 /* display the current picture, if any */
1363 static void video_display(VideoState *is)
1364 {
1365  if (!window)
1366  video_open(is);
1367 
1368  SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1369  SDL_RenderClear(renderer);
1370  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1371  video_audio_display(is);
1372  else if (is->video_st)
1373  video_image_display(is);
1374  SDL_RenderPresent(renderer);
1375 }
1376 
1377 static double get_clock(Clock *c)
1378 {
1379  if (*c->queue_serial != c->serial)
1380  return NAN;
1381  if (c->paused) {
1382  return c->pts;
1383  } else {
1384  double time = av_gettime_relative() / 1000000.0;
1385  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1386  }
1387 }
1388 
1389 static void set_clock_at(Clock *c, double pts, int serial, double time)
1390 {
1391  c->pts = pts;
1392  c->last_updated = time;
1393  c->pts_drift = c->pts - time;
1394  c->serial = serial;
1395 }
1396 
1397 static void set_clock(Clock *c, double pts, int serial)
1398 {
1399  double time = av_gettime_relative() / 1000000.0;
1400  set_clock_at(c, pts, serial, time);
1401 }
1402 
1403 static void set_clock_speed(Clock *c, double speed)
1404 {
1405  set_clock(c, get_clock(c), c->serial);
1406  c->speed = speed;
1407 }
1408 
1409 static void init_clock(Clock *c, int *queue_serial)
1410 {
1411  c->speed = 1.0;
1412  c->paused = 0;
1413  c->queue_serial = queue_serial;
1414  set_clock(c, NAN, -1);
1415 }
1416 
1417 static void sync_clock_to_slave(Clock *c, Clock *slave)
1418 {
1419  double clock = get_clock(c);
1420  double slave_clock = get_clock(slave);
1421  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1422  set_clock(c, slave_clock, slave->serial);
1423 }
1424 
1426  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1427  if (is->video_st)
1428  return AV_SYNC_VIDEO_MASTER;
1429  else
1430  return AV_SYNC_AUDIO_MASTER;
1431  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1432  if (is->audio_st)
1433  return AV_SYNC_AUDIO_MASTER;
1434  else
1435  return AV_SYNC_EXTERNAL_CLOCK;
1436  } else {
1437  return AV_SYNC_EXTERNAL_CLOCK;
1438  }
1439 }
1440 
1441 /* get the current master clock value */
1442 static double get_master_clock(VideoState *is)
1443 {
1444  double val;
1445 
1446  switch (get_master_sync_type(is)) {
1447  case AV_SYNC_VIDEO_MASTER:
1448  val = get_clock(&is->vidclk);
1449  break;
1450  case AV_SYNC_AUDIO_MASTER:
1451  val = get_clock(&is->audclk);
1452  break;
1453  default:
1454  val = get_clock(&is->extclk);
1455  break;
1456  }
1457  return val;
1458 }
1459 
1461  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1464  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1467  } else {
1468  double speed = is->extclk.speed;
1469  if (speed != 1.0)
1470  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1471  }
1472 }
1473 
1474 /* seek in the stream */
1475 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1476 {
1477  if (!is->seek_req) {
1478  is->seek_pos = pos;
1479  is->seek_rel = rel;
1480  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1481  if (seek_by_bytes)
1483  is->seek_req = 1;
1484  SDL_CondSignal(is->continue_read_thread);
1485  }
1486 }
1487 
1488 /* pause or resume the video */
1490 {
1491  if (is->paused) {
1492  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1493  if (is->read_pause_return != AVERROR(ENOSYS)) {
1494  is->vidclk.paused = 0;
1495  }
1496  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1497  }
1498  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1499  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1500 }
1501 
1502 static void toggle_pause(VideoState *is)
1503 {
1504  stream_toggle_pause(is);
1505  is->step = 0;
1506 }
1507 
1508 static void toggle_mute(VideoState *is)
1509 {
1510  is->muted = !is->muted;
1511 }
1512 
1513 static void update_volume(VideoState *is, int sign, double step)
1514 {
1515  double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1516  int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0));
1517  is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1518 }
1519 
1521 {
1522  /* if the stream is paused unpause it, then step */
1523  if (is->paused)
1524  stream_toggle_pause(is);
1525  is->step = 1;
1526 }
1527 
1528 static double compute_target_delay(double delay, VideoState *is)
1529 {
1530  double sync_threshold, diff = 0;
1531 
1532  /* update delay to follow master synchronisation source */
1534  /* if video is slave, we try to correct big delays by
1535  duplicating or deleting a frame */
1536  diff = get_clock(&is->vidclk) - get_master_clock(is);
1537 
1538  /* skip or repeat frame. We take into account the
1539  delay to compute the threshold. I still don't know
1540  if it is the best guess */
1541  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1542  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1543  if (diff <= -sync_threshold)
1544  delay = FFMAX(0, delay + diff);
1545  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1546  delay = delay + diff;
1547  else if (diff >= sync_threshold)
1548  delay = 2 * delay;
1549  }
1550  }
1551 
1552  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1553  delay, -diff);
1554 
1555  return delay;
1556 }
1557 
1558 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1559  if (vp->serial == nextvp->serial) {
1560  double duration = nextvp->pts - vp->pts;
1561  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1562  return vp->duration;
1563  else
1564  return duration;
1565  } else {
1566  return 0.0;
1567  }
1568 }
1569 
1570 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1571  /* update current video pts */
1572  set_clock(&is->vidclk, pts, serial);
1573  sync_clock_to_slave(&is->extclk, &is->vidclk);
1574 }
1575 
1576 /* called to display each frame */
1577 static void video_refresh(void *opaque, double *remaining_time)
1578 {
1579  VideoState *is = opaque;
1580  double time;
1581 
1582  Frame *sp, *sp2;
1583 
1584  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1586 
1587  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1588  time = av_gettime_relative() / 1000000.0;
1589  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1590  video_display(is);
1591  is->last_vis_time = time;
1592  }
1593  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1594  }
1595 
1596  if (is->video_st) {
1597 retry:
1598  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1599  // nothing to do, no picture to display in the queue
1600  } else {
1601  double last_duration, duration, delay;
1602  Frame *vp, *lastvp;
1603 
1604  /* dequeue the picture */
1605  lastvp = frame_queue_peek_last(&is->pictq);
1606  vp = frame_queue_peek(&is->pictq);
1607 
1608  if (vp->serial != is->videoq.serial) {
1609  frame_queue_next(&is->pictq);
1610  goto retry;
1611  }
1612 
1613  if (lastvp->serial != vp->serial)
1614  is->frame_timer = av_gettime_relative() / 1000000.0;
1615 
1616  if (is->paused)
1617  goto display;
1618 
1619  /* compute nominal last_duration */
1620  last_duration = vp_duration(is, lastvp, vp);
1621  delay = compute_target_delay(last_duration, is);
1622 
1623  time= av_gettime_relative()/1000000.0;
1624  if (time < is->frame_timer + delay) {
1625  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1626  goto display;
1627  }
1628 
1629  is->frame_timer += delay;
1630  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1631  is->frame_timer = time;
1632 
1633  SDL_LockMutex(is->pictq.mutex);
1634  if (!isnan(vp->pts))
1635  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1636  SDL_UnlockMutex(is->pictq.mutex);
1637 
1638  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1639  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1640  duration = vp_duration(is, vp, nextvp);
1641  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1642  is->frame_drops_late++;
1643  frame_queue_next(&is->pictq);
1644  goto retry;
1645  }
1646  }
1647 
1648  if (is->subtitle_st) {
1649  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1650  sp = frame_queue_peek(&is->subpq);
1651 
1652  if (frame_queue_nb_remaining(&is->subpq) > 1)
1653  sp2 = frame_queue_peek_next(&is->subpq);
1654  else
1655  sp2 = NULL;
1656 
1657  if (sp->serial != is->subtitleq.serial
1658  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1659  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1660  {
1661  if (sp->uploaded) {
1662  int i;
1663  for (i = 0; i < sp->sub.num_rects; i++) {
1664  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1665  uint8_t *pixels;
1666  int pitch, j;
1667 
1668  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
1669  for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1670  memset(pixels, 0, sub_rect->w << 2);
1671  SDL_UnlockTexture(is->sub_texture);
1672  }
1673  }
1674  }
1675  frame_queue_next(&is->subpq);
1676  } else {
1677  break;
1678  }
1679  }
1680  }
1681 
1682  frame_queue_next(&is->pictq);
1683  is->force_refresh = 1;
1684 
1685  if (is->step && !is->paused)
1686  stream_toggle_pause(is);
1687  }
1688 display:
1689  /* display picture */
1690  if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1691  video_display(is);
1692  }
1693  is->force_refresh = 0;
1694  if (show_status) {
1695  static int64_t last_time;
1696  int64_t cur_time;
1697  int aqsize, vqsize, sqsize;
1698  double av_diff;
1699 
1700  cur_time = av_gettime_relative();
1701  if (!last_time || (cur_time - last_time) >= 30000) {
1702  aqsize = 0;
1703  vqsize = 0;
1704  sqsize = 0;
1705  if (is->audio_st)
1706  aqsize = is->audioq.size;
1707  if (is->video_st)
1708  vqsize = is->videoq.size;
1709  if (is->subtitle_st)
1710  sqsize = is->subtitleq.size;
1711  av_diff = 0;
1712  if (is->audio_st && is->video_st)
1713  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1714  else if (is->video_st)
1715  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1716  else if (is->audio_st)
1717  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1719  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1720  get_master_clock(is),
1721  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1722  av_diff,
1724  aqsize / 1024,
1725  vqsize / 1024,
1726  sqsize,
1729  fflush(stdout);
1730  last_time = cur_time;
1731  }
1732  }
1733 }
1734 
1735 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1736 {
1737  Frame *vp;
1738 
1739 #if defined(DEBUG_SYNC)
1740  printf("frame_type=%c pts=%0.3f\n",
1741  av_get_picture_type_char(src_frame->pict_type), pts);
1742 #endif
1743 
1744  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1745  return -1;
1746 
1747  vp->sar = src_frame->sample_aspect_ratio;
1748  vp->uploaded = 0;
1749 
1750  vp->width = src_frame->width;
1751  vp->height = src_frame->height;
1752  vp->format = src_frame->format;
1753 
1754  vp->pts = pts;
1755  vp->duration = duration;
1756  vp->pos = pos;
1757  vp->serial = serial;
1758 
1759  set_default_window_size(vp->width, vp->height, vp->sar);
1760 
1761  av_frame_move_ref(vp->frame, src_frame);
1762  frame_queue_push(&is->pictq);
1763  return 0;
1764 }
1765 
1767 {
1768  int got_picture;
1769 
1770  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1771  return -1;
1772 
1773  if (got_picture) {
1774  double dpts = NAN;
1775 
1776  if (frame->pts != AV_NOPTS_VALUE)
1777  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1778 
1779  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1780 
1782  if (frame->pts != AV_NOPTS_VALUE) {
1783  double diff = dpts - get_master_clock(is);
1784  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1785  diff - is->frame_last_filter_delay < 0 &&
1786  is->viddec.pkt_serial == is->vidclk.serial &&
1787  is->videoq.nb_packets) {
1788  is->frame_drops_early++;
1789  av_frame_unref(frame);
1790  got_picture = 0;
1791  }
1792  }
1793  }
1794  }
1795 
1796  return got_picture;
1797 }
1798 
1799 #if CONFIG_AVFILTER
1800 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1801  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1802 {
1803  int ret, i;
1804  int nb_filters = graph->nb_filters;
1806 
1807  if (filtergraph) {
1808  outputs = avfilter_inout_alloc();
1809  inputs = avfilter_inout_alloc();
1810  if (!outputs || !inputs) {
1811  ret = AVERROR(ENOMEM);
1812  goto fail;
1813  }
1814 
1815  outputs->name = av_strdup("in");
1816  outputs->filter_ctx = source_ctx;
1817  outputs->pad_idx = 0;
1818  outputs->next = NULL;
1819 
1820  inputs->name = av_strdup("out");
1821  inputs->filter_ctx = sink_ctx;
1822  inputs->pad_idx = 0;
1823  inputs->next = NULL;
1824 
1825  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1826  goto fail;
1827  } else {
1828  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1829  goto fail;
1830  }
1831 
1832  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1833  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1834  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1835 
1836  ret = avfilter_graph_config(graph, NULL);
1837 fail:
1838  avfilter_inout_free(&outputs);
1839  avfilter_inout_free(&inputs);
1840  return ret;
1841 }
1842 
1843 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1844 {
1846  char sws_flags_str[512] = "";
1847  char buffersrc_args[256];
1848  int ret;
1849  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1850  AVCodecParameters *codecpar = is->video_st->codecpar;
1851  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1852  AVDictionaryEntry *e = NULL;
1853  int i;
1854 
1855  for (i = 0; i < FF_ARRAY_ELEMS(pix_fmts); i++)
1856  pix_fmts[i] = sdl_texture_format_map[i].format;
1857 
1858  while ((e = av_dict_get(sws_dict, "", e, AV_DICT_IGNORE_SUFFIX))) {
1859  if (!strcmp(e->key, "sws_flags")) {
1860  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1861  } else
1862  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1863  }
1864  if (strlen(sws_flags_str))
1865  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1866 
1867  graph->scale_sws_opts = av_strdup(sws_flags_str);
1868 
1869  snprintf(buffersrc_args, sizeof(buffersrc_args),
1870  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1871  frame->width, frame->height, frame->format,
1873  codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1));
1874  if (fr.num && fr.den)
1875  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1876 
1877  if ((ret = avfilter_graph_create_filter(&filt_src,
1878  avfilter_get_by_name("buffer"),
1879  "ffplay_buffer", buffersrc_args, NULL,
1880  graph)) < 0)
1881  goto fail;
1882 
1883  ret = avfilter_graph_create_filter(&filt_out,
1884  avfilter_get_by_name("buffersink"),
1885  "ffplay_buffersink", NULL, NULL, graph);
1886  if (ret < 0)
1887  goto fail;
1888 
1889  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1890  goto fail;
1891 
1892  last_filter = filt_out;
1893 
1894 /* Note: this macro adds a filter before the lastly added filter, so the
1895  * processing order of the filters is in reverse */
1896 #define INSERT_FILT(name, arg) do { \
1897  AVFilterContext *filt_ctx; \
1898  \
1899  ret = avfilter_graph_create_filter(&filt_ctx, \
1900  avfilter_get_by_name(name), \
1901  "ffplay_" name, arg, NULL, graph); \
1902  if (ret < 0) \
1903  goto fail; \
1904  \
1905  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1906  if (ret < 0) \
1907  goto fail; \
1908  \
1909  last_filter = filt_ctx; \
1910 } while (0)
1911 
1912  if (autorotate) {
1913  double theta = get_rotation(is->video_st);
1914 
1915  if (fabs(theta - 90) < 1.0) {
1916  INSERT_FILT("transpose", "clock");
1917  } else if (fabs(theta - 180) < 1.0) {
1918  INSERT_FILT("hflip", NULL);
1919  INSERT_FILT("vflip", NULL);
1920  } else if (fabs(theta - 270) < 1.0) {
1921  INSERT_FILT("transpose", "cclock");
1922  } else if (fabs(theta) > 1.0) {
1923  char rotate_buf[64];
1924  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1925  INSERT_FILT("rotate", rotate_buf);
1926  }
1927  }
1928 
1929  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1930  goto fail;
1931 
1932  is->in_video_filter = filt_src;
1933  is->out_video_filter = filt_out;
1934 
1935 fail:
1936  return ret;
1937 }
1938 
1939 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1940 {
1942  int sample_rates[2] = { 0, -1 };
1943  int64_t channel_layouts[2] = { 0, -1 };
1944  int channels[2] = { 0, -1 };
1945  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1946  char aresample_swr_opts[512] = "";
1947  AVDictionaryEntry *e = NULL;
1948  char asrc_args[256];
1949  int ret;
1950 
1951  avfilter_graph_free(&is->agraph);
1952  if (!(is->agraph = avfilter_graph_alloc()))
1953  return AVERROR(ENOMEM);
1954 
1955  while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
1956  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
1957  if (strlen(aresample_swr_opts))
1958  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
1959  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
1960 
1961  ret = snprintf(asrc_args, sizeof(asrc_args),
1962  "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1963  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1964  is->audio_filter_src.channels,
1965  1, is->audio_filter_src.freq);
1966  if (is->audio_filter_src.channel_layout)
1967  snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1968  ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
1969 
1970  ret = avfilter_graph_create_filter(&filt_asrc,
1971  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1972  asrc_args, NULL, is->agraph);
1973  if (ret < 0)
1974  goto end;
1975 
1976 
1977  ret = avfilter_graph_create_filter(&filt_asink,
1978  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1979  NULL, NULL, is->agraph);
1980  if (ret < 0)
1981  goto end;
1982 
1983  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1984  goto end;
1985  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1986  goto end;
1987 
1988  if (force_output_format) {
1989  channel_layouts[0] = is->audio_tgt.channel_layout;
1990  channels [0] = is->audio_tgt.channels;
1991  sample_rates [0] = is->audio_tgt.freq;
1992  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
1993  goto end;
1994  if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1995  goto end;
1996  if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1997  goto end;
1998  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1999  goto end;
2000  }
2001 
2002 
2003  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2004  goto end;
2005 
2006  is->in_audio_filter = filt_asrc;
2007  is->out_audio_filter = filt_asink;
2008 
2009 end:
2010  if (ret < 0)
2011  avfilter_graph_free(&is->agraph);
2012  return ret;
2013 }
2014 #endif /* CONFIG_AVFILTER */
2015 
2016 static int audio_thread(void *arg)
2017 {
2018  VideoState *is = arg;
2019  AVFrame *frame = av_frame_alloc();
2020  Frame *af;
2021 #if CONFIG_AVFILTER
2022  int last_serial = -1;
2023  int64_t dec_channel_layout;
2024  int reconfigure;
2025 #endif
2026  int got_frame = 0;
2027  AVRational tb;
2028  int ret = 0;
2029 
2030  if (!frame)
2031  return AVERROR(ENOMEM);
2032 
2033  do {
2034  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2035  goto the_end;
2036 
2037  if (got_frame) {
2038  tb = (AVRational){1, frame->sample_rate};
2039 
2040 #if CONFIG_AVFILTER
2041  dec_channel_layout = get_valid_channel_layout(frame->channel_layout, frame->channels);
2042 
2043  reconfigure =
2044  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2045  frame->format, frame->channels) ||
2046  is->audio_filter_src.channel_layout != dec_channel_layout ||
2047  is->audio_filter_src.freq != frame->sample_rate ||
2048  is->auddec.pkt_serial != last_serial;
2049 
2050  if (reconfigure) {
2051  char buf1[1024], buf2[1024];
2052  av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2053  av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2055  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2056  is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2057  frame->sample_rate, frame->channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
2058 
2059  is->audio_filter_src.fmt = frame->format;
2060  is->audio_filter_src.channels = frame->channels;
2061  is->audio_filter_src.channel_layout = dec_channel_layout;
2062  is->audio_filter_src.freq = frame->sample_rate;
2063  last_serial = is->auddec.pkt_serial;
2064 
2065  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2066  goto the_end;
2067  }
2068 
2069  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2070  goto the_end;
2071 
2072  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2073  tb = av_buffersink_get_time_base(is->out_audio_filter);
2074 #endif
2075  if (!(af = frame_queue_peek_writable(&is->sampq)))
2076  goto the_end;
2077 
2078  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2079  af->pos = frame->pkt_pos;
2080  af->serial = is->auddec.pkt_serial;
2081  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2082 
2083  av_frame_move_ref(af->frame, frame);
2084  frame_queue_push(&is->sampq);
2085 
2086 #if CONFIG_AVFILTER
2087  if (is->audioq.serial != is->auddec.pkt_serial)
2088  break;
2089  }
2090  if (ret == AVERROR_EOF)
2091  is->auddec.finished = is->auddec.pkt_serial;
2092 #endif
2093  }
2094  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2095  the_end:
2096 #if CONFIG_AVFILTER
2097  avfilter_graph_free(&is->agraph);
2098 #endif
2099  av_frame_free(&frame);
2100  return ret;
2101 }
2102 
2103 static int decoder_start(Decoder *d, int (*fn)(void *), void *arg)
2104 {
2106  d->decoder_tid = SDL_CreateThread(fn, "decoder", arg);
2107  if (!d->decoder_tid) {
2108  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2109  return AVERROR(ENOMEM);
2110  }
2111  return 0;
2112 }
2113 
2114 static int video_thread(void *arg)
2115 {
2116  VideoState *is = arg;
2117  AVFrame *frame = av_frame_alloc();
2118  double pts;
2119  double duration;
2120  int ret;
2122  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2123 
2124 #if CONFIG_AVFILTER
2126  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2127  int last_w = 0;
2128  int last_h = 0;
2129  enum AVPixelFormat last_format = -2;
2130  int last_serial = -1;
2131  int last_vfilter_idx = 0;
2132  if (!graph) {
2133  av_frame_free(&frame);
2134  return AVERROR(ENOMEM);
2135  }
2136 
2137 #endif
2138 
2139  if (!frame) {
2140 #if CONFIG_AVFILTER
2141  avfilter_graph_free(&graph);
2142 #endif
2143  return AVERROR(ENOMEM);
2144  }
2145 
2146  for (;;) {
2147  ret = get_video_frame(is, frame);
2148  if (ret < 0)
2149  goto the_end;
2150  if (!ret)
2151  continue;
2152 
2153 #if CONFIG_AVFILTER
2154  if ( last_w != frame->width
2155  || last_h != frame->height
2156  || last_format != frame->format
2157  || last_serial != is->viddec.pkt_serial
2158  || last_vfilter_idx != is->vfilter_idx) {
2160  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2161  last_w, last_h,
2162  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2163  frame->width, frame->height,
2164  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2165  avfilter_graph_free(&graph);
2166  graph = avfilter_graph_alloc();
2167  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2168  SDL_Event event;
2169  event.type = FF_QUIT_EVENT;
2170  event.user.data1 = is;
2171  SDL_PushEvent(&event);
2172  goto the_end;
2173  }
2174  filt_in = is->in_video_filter;
2175  filt_out = is->out_video_filter;
2176  last_w = frame->width;
2177  last_h = frame->height;
2178  last_format = frame->format;
2179  last_serial = is->viddec.pkt_serial;
2180  last_vfilter_idx = is->vfilter_idx;
2181  frame_rate = av_buffersink_get_frame_rate(filt_out);
2182  }
2183 
2184  ret = av_buffersrc_add_frame(filt_in, frame);
2185  if (ret < 0)
2186  goto the_end;
2187 
2188  while (ret >= 0) {
2189  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2190 
2191  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2192  if (ret < 0) {
2193  if (ret == AVERROR_EOF)
2194  is->viddec.finished = is->viddec.pkt_serial;
2195  ret = 0;
2196  break;
2197  }
2198 
2200  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2201  is->frame_last_filter_delay = 0;
2202  tb = av_buffersink_get_time_base(filt_out);
2203 #endif
2204  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2205  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2206  ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);
2207  av_frame_unref(frame);
2208 #if CONFIG_AVFILTER
2209  }
2210 #endif
2211 
2212  if (ret < 0)
2213  goto the_end;
2214  }
2215  the_end:
2216 #if CONFIG_AVFILTER
2217  avfilter_graph_free(&graph);
2218 #endif
2219  av_frame_free(&frame);
2220  return 0;
2221 }
2222 
2223 static int subtitle_thread(void *arg)
2224 {
2225  VideoState *is = arg;
2226  Frame *sp;
2227  int got_subtitle;
2228  double pts;
2229 
2230  for (;;) {
2231  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2232  return 0;
2233 
2234  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2235  break;
2236 
2237  pts = 0;
2238 
2239  if (got_subtitle && sp->sub.format == 0) {
2240  if (sp->sub.pts != AV_NOPTS_VALUE)
2241  pts = sp->sub.pts / (double)AV_TIME_BASE;
2242  sp->pts = pts;
2243  sp->serial = is->subdec.pkt_serial;
2244  sp->width = is->subdec.avctx->width;
2245  sp->height = is->subdec.avctx->height;
2246  sp->uploaded = 0;
2247 
2248  /* now we can update the picture count */
2249  frame_queue_push(&is->subpq);
2250  } else if (got_subtitle) {
2251  avsubtitle_free(&sp->sub);
2252  }
2253  }
2254  return 0;
2255 }
2256 
2257 /* copy samples for viewing in editor window */
2258 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2259 {
2260  int size, len;
2261 
2262  size = samples_size / sizeof(short);
2263  while (size > 0) {
2265  if (len > size)
2266  len = size;
2267  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2268  samples += len;
2269  is->sample_array_index += len;
2271  is->sample_array_index = 0;
2272  size -= len;
2273  }
2274 }
2275 
2276 /* return the wanted number of samples to get better sync if sync_type is video
2277  * or external master clock */
2278 static int synchronize_audio(VideoState *is, int nb_samples)
2279 {
2280  int wanted_nb_samples = nb_samples;
2281 
2282  /* if not master, then we try to remove or add samples to correct the clock */
2284  double diff, avg_diff;
2285  int min_nb_samples, max_nb_samples;
2286 
2287  diff = get_clock(&is->audclk) - get_master_clock(is);
2288 
2289  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2290  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2292  /* not enough measures to have a correct estimate */
2293  is->audio_diff_avg_count++;
2294  } else {
2295  /* estimate the A-V difference */
2296  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2297 
2298  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2299  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2300  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2301  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2302  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2303  }
2304  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2305  diff, avg_diff, wanted_nb_samples - nb_samples,
2307  }
2308  } else {
2309  /* too big difference : may be initial PTS errors, so
2310  reset A-V filter */
2311  is->audio_diff_avg_count = 0;
2312  is->audio_diff_cum = 0;
2313  }
2314  }
2315 
2316  return wanted_nb_samples;
2317 }
2318 
2319 /**
2320  * Decode one audio frame and return its uncompressed size.
2321  *
2322  * The processed audio frame is decoded, converted if required, and
2323  * stored in is->audio_buf, with size in bytes given by the return
2324  * value.
2325  */
2327 {
2328  int data_size, resampled_data_size;
2329  int64_t dec_channel_layout;
2330  av_unused double audio_clock0;
2331  int wanted_nb_samples;
2332  Frame *af;
2333 
2334  if (is->paused)
2335  return -1;
2336 
2337  do {
2338 #if defined(_WIN32)
2339  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2341  return -1;
2342  av_usleep (1000);
2343  }
2344 #endif
2345  if (!(af = frame_queue_peek_readable(&is->sampq)))
2346  return -1;
2347  frame_queue_next(&is->sampq);
2348  } while (af->serial != is->audioq.serial);
2349 
2350  data_size = av_samples_get_buffer_size(NULL, af->frame->channels,
2351  af->frame->nb_samples,
2352  af->frame->format, 1);
2353 
2354  dec_channel_layout =
2357  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2358 
2359  if (af->frame->format != is->audio_src.fmt ||
2360  dec_channel_layout != is->audio_src.channel_layout ||
2361  af->frame->sample_rate != is->audio_src.freq ||
2362  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2363  swr_free(&is->swr_ctx);
2366  dec_channel_layout, af->frame->format, af->frame->sample_rate,
2367  0, NULL);
2368  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2370  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2373  swr_free(&is->swr_ctx);
2374  return -1;
2375  }
2376  is->audio_src.channel_layout = dec_channel_layout;
2377  is->audio_src.channels = af->frame->channels;
2378  is->audio_src.freq = af->frame->sample_rate;
2379  is->audio_src.fmt = af->frame->format;
2380  }
2381 
2382  if (is->swr_ctx) {
2383  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2384  uint8_t **out = &is->audio_buf1;
2385  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2386  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2387  int len2;
2388  if (out_size < 0) {
2389  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2390  return -1;
2391  }
2392  if (wanted_nb_samples != af->frame->nb_samples) {
2393  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2394  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2395  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2396  return -1;
2397  }
2398  }
2399  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2400  if (!is->audio_buf1)
2401  return AVERROR(ENOMEM);
2402  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2403  if (len2 < 0) {
2404  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2405  return -1;
2406  }
2407  if (len2 == out_count) {
2408  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2409  if (swr_init(is->swr_ctx) < 0)
2410  swr_free(&is->swr_ctx);
2411  }
2412  is->audio_buf = is->audio_buf1;
2413  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2414  } else {
2415  is->audio_buf = af->frame->data[0];
2416  resampled_data_size = data_size;
2417  }
2418 
2419  audio_clock0 = is->audio_clock;
2420  /* update the audio clock with the pts */
2421  if (!isnan(af->pts))
2422  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2423  else
2424  is->audio_clock = NAN;
2425  is->audio_clock_serial = af->serial;
2426 #ifdef DEBUG
2427  {
2428  static double last_clock;
2429  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2430  is->audio_clock - last_clock,
2431  is->audio_clock, audio_clock0);
2432  last_clock = is->audio_clock;
2433  }
2434 #endif
2435  return resampled_data_size;
2436 }
2437 
2438 /* prepare a new audio buffer */
2439 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2440 {
2441  VideoState *is = opaque;
2442  int audio_size, len1;
2443 
2445 
2446  while (len > 0) {
2447  if (is->audio_buf_index >= is->audio_buf_size) {
2448  audio_size = audio_decode_frame(is);
2449  if (audio_size < 0) {
2450  /* if error, just output silence */
2451  is->audio_buf = NULL;
2453  } else {
2454  if (is->show_mode != SHOW_MODE_VIDEO)
2455  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2456  is->audio_buf_size = audio_size;
2457  }
2458  is->audio_buf_index = 0;
2459  }
2460  len1 = is->audio_buf_size - is->audio_buf_index;
2461  if (len1 > len)
2462  len1 = len;
2463  if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2464  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2465  else {
2466  memset(stream, 0, len1);
2467  if (!is->muted && is->audio_buf)
2468  SDL_MixAudio(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1, is->audio_volume);
2469  }
2470  len -= len1;
2471  stream += len1;
2472  is->audio_buf_index += len1;
2473  }
2475  /* Let's assume the audio driver that is used by SDL has two periods. */
2476  if (!isnan(is->audio_clock)) {
2478  sync_clock_to_slave(&is->extclk, &is->audclk);
2479  }
2480 }
2481 
2482 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2483 {
2484  SDL_AudioSpec wanted_spec, spec;
2485  const char *env;
2486  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2487  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2488  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2489 
2490  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2491  if (env) {
2492  wanted_nb_channels = atoi(env);
2493  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2494  }
2495  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2496  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2497  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2498  }
2499  wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2500  wanted_spec.channels = wanted_nb_channels;
2501  wanted_spec.freq = wanted_sample_rate;
2502  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2503  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2504  return -1;
2505  }
2506  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2507  next_sample_rate_idx--;
2508  wanted_spec.format = AUDIO_S16SYS;
2509  wanted_spec.silence = 0;
2510  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2511  wanted_spec.callback = sdl_audio_callback;
2512  wanted_spec.userdata = opaque;
2513  while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2514  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2515  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2516  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2517  if (!wanted_spec.channels) {
2518  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2519  wanted_spec.channels = wanted_nb_channels;
2520  if (!wanted_spec.freq) {
2522  "No more combinations to try, audio open failed\n");
2523  return -1;
2524  }
2525  }
2526  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2527  }
2528  if (spec.format != AUDIO_S16SYS) {
2530  "SDL advised audio format %d is not supported!\n", spec.format);
2531  return -1;
2532  }
2533  if (spec.channels != wanted_spec.channels) {
2534  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2535  if (!wanted_channel_layout) {
2537  "SDL advised channel count %d is not supported!\n", spec.channels);
2538  return -1;
2539  }
2540  }
2541 
2542  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2543  audio_hw_params->freq = spec.freq;
2544  audio_hw_params->channel_layout = wanted_channel_layout;
2545  audio_hw_params->channels = spec.channels;
2546  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
2547  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2548  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2549  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2550  return -1;
2551  }
2552  return spec.size;
2553 }
2554 
2555 /* open a given stream. Return 0 if OK */
2556 static int stream_component_open(VideoState *is, int stream_index)
2557 {
2558  AVFormatContext *ic = is->ic;
2559  AVCodecContext *avctx;
2560  AVCodec *codec;
2561  const char *forced_codec_name = NULL;
2562  AVDictionary *opts = NULL;
2563  AVDictionaryEntry *t = NULL;
2564  int sample_rate, nb_channels;
2565  int64_t channel_layout;
2566  int ret = 0;
2567  int stream_lowres = lowres;
2568 
2569  if (stream_index < 0 || stream_index >= ic->nb_streams)
2570  return -1;
2571 
2572  avctx = avcodec_alloc_context3(NULL);
2573  if (!avctx)
2574  return AVERROR(ENOMEM);
2575 
2576  ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2577  if (ret < 0)
2578  goto fail;
2579  av_codec_set_pkt_timebase(avctx, ic->streams[stream_index]->time_base);
2580 
2581  codec = avcodec_find_decoder(avctx->codec_id);
2582 
2583  switch(avctx->codec_type){
2584  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2585  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2586  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2587  }
2588  if (forced_codec_name)
2589  codec = avcodec_find_decoder_by_name(forced_codec_name);
2590  if (!codec) {
2591  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2592  "No codec could be found with name '%s'\n", forced_codec_name);
2593  else av_log(NULL, AV_LOG_WARNING,
2594  "No codec could be found with id %d\n", avctx->codec_id);
2595  ret = AVERROR(EINVAL);
2596  goto fail;
2597  }
2598 
2599  avctx->codec_id = codec->id;
2600  if(stream_lowres > av_codec_get_max_lowres(codec)){
2601  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2602  av_codec_get_max_lowres(codec));
2603  stream_lowres = av_codec_get_max_lowres(codec);
2604  }
2605  av_codec_set_lowres(avctx, stream_lowres);
2606 
2607 #if FF_API_EMU_EDGE
2608  if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2609 #endif
2610  if (fast)
2611  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2612 #if FF_API_EMU_EDGE
2613  if(codec->capabilities & AV_CODEC_CAP_DR1)
2614  avctx->flags |= CODEC_FLAG_EMU_EDGE;
2615 #endif
2616 
2617  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2618  if (!av_dict_get(opts, "threads", NULL, 0))
2619  av_dict_set(&opts, "threads", "auto", 0);
2620  if (stream_lowres)
2621  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2622  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2623  av_dict_set(&opts, "refcounted_frames", "1", 0);
2624  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2625  goto fail;
2626  }
2627  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2628  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2630  goto fail;
2631  }
2632 
2633  is->eof = 0;
2634  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2635  switch (avctx->codec_type) {
2636  case AVMEDIA_TYPE_AUDIO:
2637 #if CONFIG_AVFILTER
2638  {
2639  AVFilterContext *sink;
2640 
2641  is->audio_filter_src.freq = avctx->sample_rate;
2642  is->audio_filter_src.channels = avctx->channels;
2643  is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2644  is->audio_filter_src.fmt = avctx->sample_fmt;
2645  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2646  goto fail;
2647  sink = is->out_audio_filter;
2648  sample_rate = av_buffersink_get_sample_rate(sink);
2649  nb_channels = av_buffersink_get_channels(sink);
2650  channel_layout = av_buffersink_get_channel_layout(sink);
2651  }
2652 #else
2653  sample_rate = avctx->sample_rate;
2654  nb_channels = avctx->channels;
2655  channel_layout = avctx->channel_layout;
2656 #endif
2657 
2658  /* prepare audio output */
2659  if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2660  goto fail;
2661  is->audio_hw_buf_size = ret;
2662  is->audio_src = is->audio_tgt;
2663  is->audio_buf_size = 0;
2664  is->audio_buf_index = 0;
2665 
2666  /* init averaging filter */
2667  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2668  is->audio_diff_avg_count = 0;
2669  /* since we do not have a precise anough audio FIFO fullness,
2670  we correct audio sync only if larger than this threshold */
2672 
2673  is->audio_stream = stream_index;
2674  is->audio_st = ic->streams[stream_index];
2675 
2676  decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
2678  is->auddec.start_pts = is->audio_st->start_time;
2680  }
2681  if ((ret = decoder_start(&is->auddec, audio_thread, is)) < 0)
2682  goto out;
2683  SDL_PauseAudio(0);
2684  break;
2685  case AVMEDIA_TYPE_VIDEO:
2686  is->video_stream = stream_index;
2687  is->video_st = ic->streams[stream_index];
2688 
2689  decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
2690  if ((ret = decoder_start(&is->viddec, video_thread, is)) < 0)
2691  goto out;
2692  is->queue_attachments_req = 1;
2693  break;
2694  case AVMEDIA_TYPE_SUBTITLE:
2695  is->subtitle_stream = stream_index;
2696  is->subtitle_st = ic->streams[stream_index];
2697 
2698  decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
2699  if ((ret = decoder_start(&is->subdec, subtitle_thread, is)) < 0)
2700  goto out;
2701  break;
2702  default:
2703  break;
2704  }
2705  goto out;
2706 
2707 fail:
2708  avcodec_free_context(&avctx);
2709 out:
2710  av_dict_free(&opts);
2711 
2712  return ret;
2713 }
2714 
2715 static int decode_interrupt_cb(void *ctx)
2716 {
2717  VideoState *is = ctx;
2718  return is->abort_request;
2719 }
2720 
2721 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2722  return stream_id < 0 ||
2723  queue->abort_request ||
2725  queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2726 }
2727 
2729 {
2730  if( !strcmp(s->iformat->name, "rtp")
2731  || !strcmp(s->iformat->name, "rtsp")
2732  || !strcmp(s->iformat->name, "sdp")
2733  )
2734  return 1;
2735 
2736  if(s->pb && ( !strncmp(s->filename, "rtp:", 4)
2737  || !strncmp(s->filename, "udp:", 4)
2738  )
2739  )
2740  return 1;
2741  return 0;
2742 }
2743 
2744 /* this thread gets the stream from the disk or the network */
2745 static int read_thread(void *arg)
2746 {
2747  VideoState *is = arg;
2748  AVFormatContext *ic = NULL;
2749  int err, i, ret;
2750  int st_index[AVMEDIA_TYPE_NB];
2751  AVPacket pkt1, *pkt = &pkt1;
2752  int64_t stream_start_time;
2753  int pkt_in_play_range = 0;
2754  AVDictionaryEntry *t;
2755  SDL_mutex *wait_mutex = SDL_CreateMutex();
2756  int scan_all_pmts_set = 0;
2757  int64_t pkt_ts;
2758 
2759  if (!wait_mutex) {
2760  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2761  ret = AVERROR(ENOMEM);
2762  goto fail;
2763  }
2764 
2765  memset(st_index, -1, sizeof(st_index));
2766  is->last_video_stream = is->video_stream = -1;
2767  is->last_audio_stream = is->audio_stream = -1;
2768  is->last_subtitle_stream = is->subtitle_stream = -1;
2769  is->eof = 0;
2770 
2771  ic = avformat_alloc_context();
2772  if (!ic) {
2773  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2774  ret = AVERROR(ENOMEM);
2775  goto fail;
2776  }
2778  ic->interrupt_callback.opaque = is;
2779  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2780  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2781  scan_all_pmts_set = 1;
2782  }
2783  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2784  if (err < 0) {
2785  print_error(is->filename, err);
2786  ret = -1;
2787  goto fail;
2788  }
2789  if (scan_all_pmts_set)
2790  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2791 
2793  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2795  goto fail;
2796  }
2797  is->ic = ic;
2798 
2799  if (genpts)
2800  ic->flags |= AVFMT_FLAG_GENPTS;
2801 
2803 
2804  if (find_stream_info) {
2806  int orig_nb_streams = ic->nb_streams;
2807 
2808  err = avformat_find_stream_info(ic, opts);
2809 
2810  for (i = 0; i < orig_nb_streams; i++)
2811  av_dict_free(&opts[i]);
2812  av_freep(&opts);
2813 
2814  if (err < 0) {
2816  "%s: could not find codec parameters\n", is->filename);
2817  ret = -1;
2818  goto fail;
2819  }
2820  }
2821 
2822  if (ic->pb)
2823  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2824 
2825  if (seek_by_bytes < 0)
2826  seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2827 
2828  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2829 
2830  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2831  window_title = av_asprintf("%s - %s", t->value, input_filename);
2832 
2833  /* if seeking requested, we execute it */
2834  if (start_time != AV_NOPTS_VALUE) {
2835  int64_t timestamp;
2836 
2837  timestamp = start_time;
2838  /* add the stream start time */
2839  if (ic->start_time != AV_NOPTS_VALUE)
2840  timestamp += ic->start_time;
2841  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2842  if (ret < 0) {
2843  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2844  is->filename, (double)timestamp / AV_TIME_BASE);
2845  }
2846  }
2847 
2848  is->realtime = is_realtime(ic);
2849 
2850  if (show_status)
2851  av_dump_format(ic, 0, is->filename, 0);
2852 
2853  for (i = 0; i < ic->nb_streams; i++) {
2854  AVStream *st = ic->streams[i];
2855  enum AVMediaType type = st->codecpar->codec_type;
2856  st->discard = AVDISCARD_ALL;
2857  if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
2858  if (avformat_match_stream_specifier(ic, st, wanted_stream_spec[type]) > 0)
2859  st_index[type] = i;
2860  }
2861  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2862  if (wanted_stream_spec[i] && st_index[i] == -1) {
2863  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2864  st_index[i] = INT_MAX;
2865  }
2866  }
2867 
2868  if (!video_disable)
2869  st_index[AVMEDIA_TYPE_VIDEO] =
2871  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2872  if (!audio_disable)
2873  st_index[AVMEDIA_TYPE_AUDIO] =
2875  st_index[AVMEDIA_TYPE_AUDIO],
2876  st_index[AVMEDIA_TYPE_VIDEO],
2877  NULL, 0);
2879  st_index[AVMEDIA_TYPE_SUBTITLE] =
2881  st_index[AVMEDIA_TYPE_SUBTITLE],
2882  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2883  st_index[AVMEDIA_TYPE_AUDIO] :
2884  st_index[AVMEDIA_TYPE_VIDEO]),
2885  NULL, 0);
2886 
2887  is->show_mode = show_mode;
2888  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2889  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2890  AVCodecParameters *codecpar = st->codecpar;
2892  if (codecpar->width)
2893  set_default_window_size(codecpar->width, codecpar->height, sar);
2894  }
2895 
2896  /* open the streams */
2897  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2898  stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2899  }
2900 
2901  ret = -1;
2902  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2903  ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2904  }
2905  if (is->show_mode == SHOW_MODE_NONE)
2906  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2907 
2908  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2909  stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2910  }
2911 
2912  if (is->video_stream < 0 && is->audio_stream < 0) {
2913  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2914  is->filename);
2915  ret = -1;
2916  goto fail;
2917  }
2918 
2919  if (infinite_buffer < 0 && is->realtime)
2920  infinite_buffer = 1;
2921 
2922  for (;;) {
2923  if (is->abort_request)
2924  break;
2925  if (is->paused != is->last_paused) {
2926  is->last_paused = is->paused;
2927  if (is->paused)
2928  is->read_pause_return = av_read_pause(ic);
2929  else
2930  av_read_play(ic);
2931  }
2932 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2933  if (is->paused &&
2934  (!strcmp(ic->iformat->name, "rtsp") ||
2935  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2936  /* wait 10 ms to avoid trying to get another packet */
2937  /* XXX: horrible */
2938  SDL_Delay(10);
2939  continue;
2940  }
2941 #endif
2942  if (is->seek_req) {
2943  int64_t seek_target = is->seek_pos;
2944  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2945  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2946 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2947 // of the seek_pos/seek_rel variables
2948 
2949  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2950  if (ret < 0) {
2952  "%s: error while seeking\n", is->ic->filename);
2953  } else {
2954  if (is->audio_stream >= 0) {
2955  packet_queue_flush(&is->audioq);
2956  packet_queue_put(&is->audioq, &flush_pkt);
2957  }
2958  if (is->subtitle_stream >= 0) {
2960  packet_queue_put(&is->subtitleq, &flush_pkt);
2961  }
2962  if (is->video_stream >= 0) {
2963  packet_queue_flush(&is->videoq);
2964  packet_queue_put(&is->videoq, &flush_pkt);
2965  }
2966  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2967  set_clock(&is->extclk, NAN, 0);
2968  } else {
2969  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
2970  }
2971  }
2972  is->seek_req = 0;
2973  is->queue_attachments_req = 1;
2974  is->eof = 0;
2975  if (is->paused)
2976  step_to_next_frame(is);
2977  }
2978  if (is->queue_attachments_req) {
2980  AVPacket copy;
2981  if ((ret = av_copy_packet(&copy, &is->video_st->attached_pic)) < 0)
2982  goto fail;
2983  packet_queue_put(&is->videoq, &copy);
2985  }
2986  is->queue_attachments_req = 0;
2987  }
2988 
2989  /* if the queue are full, no need to read more */
2990  if (infinite_buffer<1 &&
2991  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2992  || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
2995  /* wait 10 ms */
2996  SDL_LockMutex(wait_mutex);
2997  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2998  SDL_UnlockMutex(wait_mutex);
2999  continue;
3000  }
3001  if (!is->paused &&
3002  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3003  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3004  if (loop != 1 && (!loop || --loop)) {
3005  stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
3006  } else if (autoexit) {
3007  ret = AVERROR_EOF;
3008  goto fail;
3009  }
3010  }
3011  ret = av_read_frame(ic, pkt);
3012  if (ret < 0) {
3013  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3014  if (is->video_stream >= 0)
3016  if (is->audio_stream >= 0)
3018  if (is->subtitle_stream >= 0)
3020  is->eof = 1;
3021  }
3022  if (ic->pb && ic->pb->error)
3023  break;
3024  SDL_LockMutex(wait_mutex);
3025  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3026  SDL_UnlockMutex(wait_mutex);
3027  continue;
3028  } else {
3029  is->eof = 0;
3030  }
3031  /* check if packet is in play range specified by user, then queue, otherwise discard */
3032  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3033  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3034  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3035  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3036  av_q2d(ic->streams[pkt->stream_index]->time_base) -
3037  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3038  <= ((double)duration / 1000000);
3039  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3040  packet_queue_put(&is->audioq, pkt);
3041  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3043  packet_queue_put(&is->videoq, pkt);
3044  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3045  packet_queue_put(&is->subtitleq, pkt);
3046  } else {
3047  av_packet_unref(pkt);
3048  }
3049  }
3050 
3051  ret = 0;
3052  fail:
3053  if (ic && !is->ic)
3054  avformat_close_input(&ic);
3055 
3056  if (ret != 0) {
3057  SDL_Event event;
3058 
3059  event.type = FF_QUIT_EVENT;
3060  event.user.data1 = is;
3061  SDL_PushEvent(&event);
3062  }
3063  SDL_DestroyMutex(wait_mutex);
3064  return 0;
3065 }
3066 
3067 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
3068 {
3069  VideoState *is;
3070 
3071  is = av_mallocz(sizeof(VideoState));
3072  if (!is)
3073  return NULL;
3074  is->filename = av_strdup(filename);
3075  if (!is->filename)
3076  goto fail;
3077  is->iformat = iformat;
3078  is->ytop = 0;
3079  is->xleft = 0;
3080 
3081  /* start video display */
3082  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3083  goto fail;
3084  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3085  goto fail;
3086  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3087  goto fail;
3088 
3089  if (packet_queue_init(&is->videoq) < 0 ||
3090  packet_queue_init(&is->audioq) < 0 ||
3091  packet_queue_init(&is->subtitleq) < 0)
3092  goto fail;
3093 
3094  if (!(is->continue_read_thread = SDL_CreateCond())) {
3095  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3096  goto fail;
3097  }
3098 
3099  init_clock(&is->vidclk, &is->videoq.serial);
3100  init_clock(&is->audclk, &is->audioq.serial);
3101  init_clock(&is->extclk, &is->extclk.serial);
3102  is->audio_clock_serial = -1;
3103  if (startup_volume < 0)
3104  av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
3105  if (startup_volume > 100)
3106  av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
3107  startup_volume = av_clip(startup_volume, 0, 100);
3108  startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
3110  is->muted = 0;
3111  is->av_sync_type = av_sync_type;
3112  is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
3113  if (!is->read_tid) {
3114  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3115 fail:
3116  stream_close(is);
3117  return NULL;
3118  }
3119  return is;
3120 }
3121 
3123 {
3124  AVFormatContext *ic = is->ic;
3125  int start_index, stream_index;
3126  int old_index;
3127  AVStream *st;
3128  AVProgram *p = NULL;
3129  int nb_streams = is->ic->nb_streams;
3130 
3131  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3132  start_index = is->last_video_stream;
3133  old_index = is->video_stream;
3134  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3135  start_index = is->last_audio_stream;
3136  old_index = is->audio_stream;
3137  } else {
3138  start_index = is->last_subtitle_stream;
3139  old_index = is->subtitle_stream;
3140  }
3141  stream_index = start_index;
3142 
3143  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3145  if (p) {
3146  nb_streams = p->nb_stream_indexes;
3147  for (start_index = 0; start_index < nb_streams; start_index++)
3148  if (p->stream_index[start_index] == stream_index)
3149  break;
3150  if (start_index == nb_streams)
3151  start_index = -1;
3152  stream_index = start_index;
3153  }
3154  }
3155 
3156  for (;;) {
3157  if (++stream_index >= nb_streams)
3158  {
3159  if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3160  {
3161  stream_index = -1;
3162  is->last_subtitle_stream = -1;
3163  goto the_end;
3164  }
3165  if (start_index == -1)
3166  return;
3167  stream_index = 0;
3168  }
3169  if (stream_index == start_index)
3170  return;
3171  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3172  if (st->codecpar->codec_type == codec_type) {
3173  /* check that parameters are OK */
3174  switch (codec_type) {
3175  case AVMEDIA_TYPE_AUDIO:
3176  if (st->codecpar->sample_rate != 0 &&
3177  st->codecpar->channels != 0)
3178  goto the_end;
3179  break;
3180  case AVMEDIA_TYPE_VIDEO:
3181  case AVMEDIA_TYPE_SUBTITLE:
3182  goto the_end;
3183  default:
3184  break;
3185  }
3186  }
3187  }
3188  the_end:
3189  if (p && stream_index != -1)
3190  stream_index = p->stream_index[stream_index];
3191  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3192  av_get_media_type_string(codec_type),
3193  old_index,
3194  stream_index);
3195 
3196  stream_component_close(is, old_index);
3197  stream_component_open(is, stream_index);
3198 }
3199 
3200 
3202 {
3204  SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
3205 }
3206 
3208 {
3209  int next = is->show_mode;
3210  do {
3211  next = (next + 1) % SHOW_MODE_NB;
3212  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3213  if (is->show_mode != next) {
3214  is->force_refresh = 1;
3215  is->show_mode = next;
3216  }
3217 }
3218 
3219 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3220  double remaining_time = 0.0;
3221  SDL_PumpEvents();
3222  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3224  SDL_ShowCursor(0);
3225  cursor_hidden = 1;
3226  }
3227  if (remaining_time > 0.0)
3228  av_usleep((int64_t)(remaining_time * 1000000.0));
3229  remaining_time = REFRESH_RATE;
3230  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3231  video_refresh(is, &remaining_time);
3232  SDL_PumpEvents();
3233  }
3234 }
3235 
3236 static void seek_chapter(VideoState *is, int incr)
3237 {
3238  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3239  int i;
3240 
3241  if (!is->ic->nb_chapters)
3242  return;
3243 
3244  /* find the current chapter */
3245  for (i = 0; i < is->ic->nb_chapters; i++) {
3246  AVChapter *ch = is->ic->chapters[i];
3247  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3248  i--;
3249  break;
3250  }
3251  }
3252 
3253  i += incr;
3254  i = FFMAX(i, 0);
3255  if (i >= is->ic->nb_chapters)
3256  return;
3257 
3258  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3259  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3260  AV_TIME_BASE_Q), 0, 0);
3261 }
3262 
3263 /* handle an event sent by the GUI */
3264 static void event_loop(VideoState *cur_stream)
3265 {
3266  SDL_Event event;
3267  double incr, pos, frac;
3268 
3269  for (;;) {
3270  double x;
3271  refresh_loop_wait_event(cur_stream, &event);
3272  switch (event.type) {
3273  case SDL_KEYDOWN:
3274  if (exit_on_keydown) {
3275  do_exit(cur_stream);
3276  break;
3277  }
3278  switch (event.key.keysym.sym) {
3279  case SDLK_ESCAPE:
3280  case SDLK_q:
3281  do_exit(cur_stream);
3282  break;
3283  case SDLK_f:
3284  toggle_full_screen(cur_stream);
3285  cur_stream->force_refresh = 1;
3286  break;
3287  case SDLK_p:
3288  case SDLK_SPACE:
3289  toggle_pause(cur_stream);
3290  break;
3291  case SDLK_m:
3292  toggle_mute(cur_stream);
3293  break;
3294  case SDLK_KP_MULTIPLY:
3295  case SDLK_0:
3296  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3297  break;
3298  case SDLK_KP_DIVIDE:
3299  case SDLK_9:
3300  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3301  break;
3302  case SDLK_s: // S: Step to next frame
3303  step_to_next_frame(cur_stream);
3304  break;
3305  case SDLK_a:
3307  break;
3308  case SDLK_v:
3310  break;
3311  case SDLK_c:
3315  break;
3316  case SDLK_t:
3318  break;
3319  case SDLK_w:
3320 #if CONFIG_AVFILTER
3321  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3322  if (++cur_stream->vfilter_idx >= nb_vfilters)
3323  cur_stream->vfilter_idx = 0;
3324  } else {
3325  cur_stream->vfilter_idx = 0;
3326  toggle_audio_display(cur_stream);
3327  }
3328 #else
3329  toggle_audio_display(cur_stream);
3330 #endif
3331  break;
3332  case SDLK_PAGEUP:
3333  if (cur_stream->ic->nb_chapters <= 1) {
3334  incr = 600.0;
3335  goto do_seek;
3336  }
3337  seek_chapter(cur_stream, 1);
3338  break;
3339  case SDLK_PAGEDOWN:
3340  if (cur_stream->ic->nb_chapters <= 1) {
3341  incr = -600.0;
3342  goto do_seek;
3343  }
3344  seek_chapter(cur_stream, -1);
3345  break;
3346  case SDLK_LEFT:
3347  incr = -10.0;
3348  goto do_seek;
3349  case SDLK_RIGHT:
3350  incr = 10.0;
3351  goto do_seek;
3352  case SDLK_UP:
3353  incr = 60.0;
3354  goto do_seek;
3355  case SDLK_DOWN:
3356  incr = -60.0;
3357  do_seek:
3358  if (seek_by_bytes) {
3359  pos = -1;
3360  if (pos < 0 && cur_stream->video_stream >= 0)
3361  pos = frame_queue_last_pos(&cur_stream->pictq);
3362  if (pos < 0 && cur_stream->audio_stream >= 0)
3363  pos = frame_queue_last_pos(&cur_stream->sampq);
3364  if (pos < 0)
3365  pos = avio_tell(cur_stream->ic->pb);
3366  if (cur_stream->ic->bit_rate)
3367  incr *= cur_stream->ic->bit_rate / 8.0;
3368  else
3369  incr *= 180000.0;
3370  pos += incr;
3371  stream_seek(cur_stream, pos, incr, 1);
3372  } else {
3373  pos = get_master_clock(cur_stream);
3374  if (isnan(pos))
3375  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3376  pos += incr;
3377  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3378  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3379  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3380  }
3381  break;
3382  default:
3383  break;
3384  }
3385  break;
3386  case SDL_MOUSEBUTTONDOWN:
3387  if (exit_on_mousedown) {
3388  do_exit(cur_stream);
3389  break;
3390  }
3391  if (event.button.button == SDL_BUTTON_LEFT) {
3392  static int64_t last_mouse_left_click = 0;
3393  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3394  toggle_full_screen(cur_stream);
3395  cur_stream->force_refresh = 1;
3396  last_mouse_left_click = 0;
3397  } else {
3398  last_mouse_left_click = av_gettime_relative();
3399  }
3400  }
3401  case SDL_MOUSEMOTION:
3402  if (cursor_hidden) {
3403  SDL_ShowCursor(1);
3404  cursor_hidden = 0;
3405  }
3407  if (event.type == SDL_MOUSEBUTTONDOWN) {
3408  if (event.button.button != SDL_BUTTON_RIGHT)
3409  break;
3410  x = event.button.x;
3411  } else {
3412  if (!(event.motion.state & SDL_BUTTON_RMASK))
3413  break;
3414  x = event.motion.x;
3415  }
3416  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3417  uint64_t size = avio_size(cur_stream->ic->pb);
3418  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3419  } else {
3420  int64_t ts;
3421  int ns, hh, mm, ss;
3422  int tns, thh, tmm, tss;
3423  tns = cur_stream->ic->duration / 1000000LL;
3424  thh = tns / 3600;
3425  tmm = (tns % 3600) / 60;
3426  tss = (tns % 60);
3427  frac = x / cur_stream->width;
3428  ns = frac * tns;
3429  hh = ns / 3600;
3430  mm = (ns % 3600) / 60;
3431  ss = (ns % 60);
3433  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3434  hh, mm, ss, thh, tmm, tss);
3435  ts = frac * cur_stream->ic->duration;
3436  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3437  ts += cur_stream->ic->start_time;
3438  stream_seek(cur_stream, ts, 0, 0);
3439  }
3440  break;
3441  case SDL_WINDOWEVENT:
3442  switch (event.window.event) {
3443  case SDL_WINDOWEVENT_RESIZED:
3444  screen_width = cur_stream->width = event.window.data1;
3445  screen_height = cur_stream->height = event.window.data2;
3446  if (cur_stream->vis_texture) {
3447  SDL_DestroyTexture(cur_stream->vis_texture);
3448  cur_stream->vis_texture = NULL;
3449  }
3450  case SDL_WINDOWEVENT_EXPOSED:
3451  cur_stream->force_refresh = 1;
3452  }
3453  break;
3454  case SDL_QUIT:
3455  case FF_QUIT_EVENT:
3456  do_exit(cur_stream);
3457  break;
3458  default:
3459  break;
3460  }
3461  }
3462 }
3463 
3464 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3465 {
3466  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3467  return opt_default(NULL, "video_size", arg);
3468 }
3469 
3470 static int opt_width(void *optctx, const char *opt, const char *arg)
3471 {
3472  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3473  return 0;
3474 }
3475 
3476 static int opt_height(void *optctx, const char *opt, const char *arg)
3477 {
3478  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3479  return 0;
3480 }
3481 
3482 static int opt_format(void *optctx, const char *opt, const char *arg)
3483 {
3484  file_iformat = av_find_input_format(arg);
3485  if (!file_iformat) {
3486  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3487  return AVERROR(EINVAL);
3488  }
3489  return 0;
3490 }
3491 
3492 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3493 {
3494  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3495  return opt_default(NULL, "pixel_format", arg);
3496 }
3497 
3498 static int opt_sync(void *optctx, const char *opt, const char *arg)
3499 {
3500  if (!strcmp(arg, "audio"))
3502  else if (!strcmp(arg, "video"))
3504  else if (!strcmp(arg, "ext"))
3506  else {
3507  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3508  exit(1);
3509  }
3510  return 0;
3511 }
3512 
3513 static int opt_seek(void *optctx, const char *opt, const char *arg)
3514 {
3515  start_time = parse_time_or_die(opt, arg, 1);
3516  return 0;
3517 }
3518 
3519 static int opt_duration(void *optctx, const char *opt, const char *arg)
3520 {
3521  duration = parse_time_or_die(opt, arg, 1);
3522  return 0;
3523 }
3524 
3525 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3526 {
3527  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3528  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3529  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3530  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3531  return 0;
3532 }
3533 
3534 static void opt_input_file(void *optctx, const char *filename)
3535 {
3536  if (input_filename) {
3538  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3539  filename, input_filename);
3540  exit(1);
3541  }
3542  if (!strcmp(filename, "-"))
3543  filename = "pipe:";
3544  input_filename = filename;
3545 }
3546 
3547 static int opt_codec(void *optctx, const char *opt, const char *arg)
3548 {
3549  const char *spec = strchr(opt, ':');
3550  if (!spec) {
3552  "No media specifier was specified in '%s' in option '%s'\n",
3553  arg, opt);
3554  return AVERROR(EINVAL);
3555  }
3556  spec++;
3557  switch (spec[0]) {
3558  case 'a' : audio_codec_name = arg; break;
3559  case 's' : subtitle_codec_name = arg; break;
3560  case 'v' : video_codec_name = arg; break;
3561  default:
3563  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3564  return AVERROR(EINVAL);
3565  }
3566  return 0;
3567 }
3568 
3569 static int dummy;
3570 
3571 static const OptionDef options[] = {
3573  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3574  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3575  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3576  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3577  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3578  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3579  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3580  { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3581  { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3582  { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3583  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3584  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3585  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3586  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3587  { "noborder", OPT_BOOL, { &borderless }, "borderless window" },
3588  { "volume", OPT_INT | HAS_ARG, { &startup_volume}, "set startup volume 0=min 100=max", "volume" },
3589  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3590  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3591  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3592  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3593  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3594  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3595  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3596  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3597  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3598  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3599  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3600  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3601  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3602  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3603  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3604 #if CONFIG_AVFILTER
3605  { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3606  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3607 #endif
3608  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3609  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3610  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3611  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3612  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3613  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3614  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3615  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3616  { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3617  { "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
3618  "read and decode the streams to fill missing information with heuristics" },
3619  { NULL, },
3620 };
3621 
3622 static void show_usage(void)
3623 {
3624  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3625  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3626  av_log(NULL, AV_LOG_INFO, "\n");
3627 }
3628 
3629 void show_help_default(const char *opt, const char *arg)
3630 {
3632  show_usage();
3633  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3634  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3635  printf("\n");
3638 #if !CONFIG_AVFILTER
3640 #else
3642 #endif
3643  printf("\nWhile playing:\n"
3644  "q, ESC quit\n"
3645  "f toggle full screen\n"
3646  "p, SPC pause\n"
3647  "m toggle mute\n"
3648  "9, 0 decrease and increase volume respectively\n"
3649  "/, * decrease and increase volume respectively\n"
3650  "a cycle audio channel in the current program\n"
3651  "v cycle video channel\n"
3652  "t cycle subtitle channel in the current program\n"
3653  "c cycle program\n"
3654  "w cycle video filters or show modes\n"
3655  "s activate frame-step mode\n"
3656  "left/right seek backward/forward 10 seconds\n"
3657  "down/up seek backward/forward 1 minute\n"
3658  "page down/page up seek backward/forward 10 minutes\n"
3659  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3660  "left double-click toggle full screen\n"
3661  );
3662 }
3663 
3664 static int lockmgr(void **mtx, enum AVLockOp op)
3665 {
3666  switch(op) {
3667  case AV_LOCK_CREATE:
3668  *mtx = SDL_CreateMutex();
3669  if(!*mtx) {
3670  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
3671  return 1;
3672  }
3673  return 0;
3674  case AV_LOCK_OBTAIN:
3675  return !!SDL_LockMutex(*mtx);
3676  case AV_LOCK_RELEASE:
3677  return !!SDL_UnlockMutex(*mtx);
3678  case AV_LOCK_DESTROY:
3679  SDL_DestroyMutex(*mtx);
3680  return 0;
3681  }
3682  return 1;
3683 }
3684 
3685 /* Called from the main */
3686 int main(int argc, char **argv)
3687 {
3688  int flags;
3689  VideoState *is;
3690 
3691  init_dynload();
3692 
3694  parse_loglevel(argc, argv, options);
3695 
3696  /* register all codecs, demux and protocols */
3697 #if CONFIG_AVDEVICE
3699 #endif
3700 #if CONFIG_AVFILTER
3702 #endif
3703  av_register_all();
3705 
3706  init_opts();
3707 
3708  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3709  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3710 
3711  show_banner(argc, argv, options);
3712 
3713  parse_options(NULL, argc, argv, options, opt_input_file);
3714 
3715  if (!input_filename) {
3716  show_usage();
3717  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3719  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3720  exit(1);
3721  }
3722 
3723  if (display_disable) {
3724  video_disable = 1;
3725  }
3726  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3727  if (audio_disable)
3728  flags &= ~SDL_INIT_AUDIO;
3729  else {
3730  /* Try to work around an occasional ALSA buffer underflow issue when the
3731  * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3732  if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3733  SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
3734  }
3735  if (display_disable)
3736  flags &= ~SDL_INIT_VIDEO;
3737  if (SDL_Init (flags)) {
3738  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3739  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3740  exit(1);
3741  }
3742 
3743  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3744  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3745 
3747  av_log(NULL, AV_LOG_FATAL, "Could not initialize lock manager!\n");
3748  do_exit(NULL);
3749  }
3750 
3751  av_init_packet(&flush_pkt);
3752  flush_pkt.data = (uint8_t *)&flush_pkt;
3753 
3754  is = stream_open(input_filename, file_iformat);
3755  if (!is) {
3756  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3757  do_exit(NULL);
3758  }
3759 
3760  event_loop(is);
3761 
3762  /* never returns */
3763 
3764  return 0;
3765 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1556
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:94
AVFilterContext ** filters
Definition: avfilter.h:842
static void packet_queue_abort(PacketQueue *q)
Definition: ffplay.c:516
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3525
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:119
static void frame_queue_push(FrameQueue *f)
Definition: ffplay.c:770
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:82
static void video_image_display(VideoState *is)
Definition: ffplay.c:954
#define NULL
Definition: coverity.c:32
Clock audclk
Definition: ffplay.c:219
const char const char void * val
Definition: avisynth_c.h:771
#define AVFMT_NOBINSEARCH
Format does not allow to fall back on binary search via read_timestamp.
Definition: avformat.h:492
static void packet_queue_flush(PacketQueue *q)
Definition: ffplay.c:491
const char * s
Definition: avisynth_c.h:768
int width
Definition: ffplay.c:292
#define OPT_EXPERT
Definition: cmdutils.h:168
static double get_clock(Clock *c)
Definition: ffplay.c:1377
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:327
enum AVSampleFormat fmt
Definition: ffplay.c:138
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3476
static const struct TextureFormatEntry sdl_texture_format_map[]
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffplay.c:64
static void copy(const float *p1, float *p2, const int length)
SDL_cond * cond
Definition: ffplay.c:178
int64_t pts_correction_num_faulty_dts
Number of incorrect PTS values so far.
Definition: avcodec.h:3481
static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
Definition: ffplay.c:2482
FrameQueue pictq
Definition: ffplay.c:223
Decoder auddec
Definition: ffplay.c:227
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:92
AVStream * subtitle_st
Definition: ffplay.c:277
This structure describes decoded (raw) audio or video data.
Definition: frame.h:201
#define VIDEO_PICTURE_QUEUE_SIZE
Definition: ffplay.c:129
static SDL_Renderer * renderer
Definition: ffplay.c:363
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:4071
#define SWS_BICUBIC
Definition: swscale.h:60
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1605
double rdftspeed
Definition: ffplay.c:343
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
double frame_timer
Definition: ffplay.c:280
static AVInputFormat * file_iformat
Definition: ffplay.c:310
#define OPT_VIDEO
Definition: cmdutils.h:170
static void opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3534
double get_rotation(AVStream *st)
Definition: cmdutils.c:2098
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:473
int av_lockmgr_register(int(*cb)(void **mutex, enum AVLockOp op))
Register a user provided lock manager supporting the operations specified by AVLockOp.
Definition: utils.c:1960
misc image utilities
static int opt_format(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3482
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:80
Unlock the mutex.
Definition: avcodec.h:6313
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AVRational next_pts_tb
Definition: ffplay.c:199
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1425
Main libavfilter public API header.
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:64
int rindex
Definition: ffplay.c:171
#define AV_SYNC_FRAMEDUP_THRESHOLD
Definition: ffplay.c:84
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:79
static int default_height
Definition: ffplay.c:314
Memory buffer source API.
double frame_last_filter_delay
Definition: ffplay.c:282
FrameQueue sampq
Definition: ffplay.c:225
enum VideoState::ShowMode show_mode
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:203
AVRational sample_aspect_ratio
Video only.
Definition: avcodec.h:4205
int seek_flags
Definition: ffplay.c:212
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:1028
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:709
int serial
Definition: ffplay.c:124
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio...
Definition: utils.c:4915
static int64_t cur_time
Definition: ffserver.c:252
#define OPT_AUDIO
Definition: cmdutils.h:171
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3122
void av_codec_set_pkt_timebase(AVCodecContext *avctx, AVRational val)
int num
Numerator.
Definition: rational.h:59
static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3492
int size
Definition: avcodec.h:1672
const char * b
Definition: vf_curves.c:113
static void stream_toggle_pause(VideoState *is)
Definition: ffplay.c:1489
#define DEBUG
Definition: vf_framerate.c:29
MyAVPacketList * first_pkt
Definition: ffplay.c:119
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
Definition: ffplay.c:1475
int av_log2(unsigned v)
Definition: intmath.c:26
static int seek_by_bytes
Definition: ffplay.c:321
double audio_diff_cum
Definition: ffplay.c:237
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
#define REFRESH_RATE
Definition: ffplay.c:100
AVInputFormat * iformat
Definition: ffplay.c:205
enum AVMediaType codec_type
Definition: rtp.c:37
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1451
AVCodecContext * avctx
Definition: ffplay.c:191
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1150
int paused
Definition: ffplay.c:208
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3547
static AVStream * video_stream
static unsigned sws_flags
Definition: ffplay.c:110
int abort_request
Definition: ffplay.c:123
AVS_VideoFrame int int int int new_height
Definition: avisynth_c.h:818
unsigned num_rects
Definition: avcodec.h:4109
static void set_clock_at(Clock *c, double pts, int serial, double time)
Definition: ffplay.c:1389
static void toggle_pause(VideoState *is)
Definition: ffplay.c:1502
int out_size
Definition: movenc.c:55
double audio_diff_threshold
Definition: ffplay.c:239
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:528
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:222
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
discard all
Definition: avcodec.h:829
int64_t channel_layout
Definition: ffplay.c:137
static AVPacket pkt
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
Definition: options.c:95
static int audio_disable
Definition: ffplay.c:317
AVStream * audio_st
Definition: ffplay.c:241
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:997
#define AV_PIX_FMT_RGB444
Definition: pixfmt.h:361
static const char * audio_codec_name
Definition: ffplay.c:340
#define fn(a)
int serial
Definition: ffplay.c:157
AVCodec.
Definition: avcodec.h:3722
static void seek_chapter(VideoState *is, int incr)
Definition: ffplay.c:3236
double pts_drift
Definition: ffplay.c:145
#define CMDUTILS_COMMON_OPTIONS
Definition: cmdutils.h:231
AVDictionary * filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, AVCodec *codec)
Filter out options for given codec.
Definition: cmdutils.c:2002
This struct describes the properties of an encoded stream.
Definition: avcodec.h:4121
AVLockOp
Lock operation used by lockmgr.
Definition: avcodec.h:6310
int width
Definition: ffplay.c:161
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:845
AVStream * video_st
Definition: ffplay.c:284
Clock extclk
Definition: ffplay.c:221
static VideoState * stream_open(const char *filename, AVInputFormat *iformat)
Definition: ffplay.c:3067
void * opaque
Definition: avio.h:60
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
struct SwsContext * img_convert_ctx
Definition: ffplay.c:287
AVSubtitleRect ** rects
Definition: avcodec.h:4110
Format I/O context.
Definition: avformat.h:1349
static void toggle_audio_display(VideoState *is)
Definition: ffplay.c:3207
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: utils.c:4938
Definition: ffplay.c:154
memory buffer sink API for audio and video
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:295
static int16_t block[64]
Definition: dct.c:115
int av_sync_type
Definition: ffplay.c:233
unsigned int nb_stream_indexes
Definition: avformat.h:1281
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
int rindex_shown
Definition: ffplay.c:176
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:4073
double pts
Definition: ffplay.c:158
static AVFilter ** last_filter
Definition: avfilter.c:559
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
Public dictionary API.
double audio_diff_avg_coef
Definition: ffplay.c:238
AVRational start_pts_tb
Definition: ffplay.c:197
static int read_thread(void *arg)
Definition: ffplay.c:2745
int keep_last
Definition: ffplay.c:175
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:135
int rdft_bits
Definition: ffplay.c:268
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:889
int size
Definition: ffplay.c:121
static Frame * frame_queue_peek(FrameQueue *f)
Definition: ffplay.c:723
static int64_t start_time
Definition: ffplay.c:327
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2516
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
Definition: cmdutils.c:98
Lock the mutex.
Definition: avcodec.h:6312
uint8_t
static int nb_streams
Definition: ffprobe.c:276
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
int opt_default(void *optctx, const char *opt, const char *arg)
Fallback for options that are not explicitly handled, these will be parsed through AVOptions...
Definition: cmdutils.c:544
static int default_width
Definition: ffplay.c:313
int last_video_stream
Definition: ffplay.c:304
int width
Video only.
Definition: avcodec.h:4195
int last_subtitle_stream
Definition: ffplay.c:304
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:73
AVOptions.
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:678
#define HAS_ARG
Definition: cmdutils.h:166
int audio_hw_buf_size
Definition: ffplay.c:243
static void stream_component_close(VideoState *is, int stream_index)
Definition: ffplay.c:1182
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:202
static int decode_interrupt_cb(void *ctx)
Definition: ffplay.c:2715
int64_t duration
Definition: ffplay.c:122
struct SwrContext * swr_ctx
Definition: ffplay.c:257
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1689
int finished
Definition: ffplay.c:193
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
static void event_loop(VideoState *cur_stream)
Definition: ffplay.c:3264
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
Definition: ffplay.c:401
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
Definition: utils.c:4959
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:294
static void packet_queue_destroy(PacketQueue *q)
Definition: ffplay.c:509
#define AVFMT_FLAG_GENPTS
Generate missing pts even if it requires parsing future frames.
Definition: avformat.h:1461
static int framedrop
Definition: ffplay.c:337
SDL_Texture * vis_texture
Definition: ffplay.c:272
void init_opts(void)
Initialize the cmdutils option system, in particular allocate the *_opts contexts.
Definition: cmdutils.c:84
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1417
AVPacket pkt
Definition: ffplay.c:113
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: utils.c:2354
int bytes_per_sec
Definition: ffplay.c:140
void avfilter_register_all(void)
Initialize the filter system.
Definition: allfilters.c:402
static AVFrame * frame
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:144
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
void av_codec_set_lowres(AVCodecContext *avctx, int val)
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
static int64_t audio_callback_time
Definition: ffplay.c:356
#define height
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
static int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
Definition: ffplay.c:412
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1460
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
Definition: ffplay.c:536
static void sigterm_handler(int sig)
Definition: ffplay.c:1298
uint8_t * data
Definition: avcodec.h:1671
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:382
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
int freq
Definition: ffplay.c:135
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:672
static int flags
Definition: log.c:57
int avformat_network_init(void)
Do global initialization of network components.
Definition: utils.c:4850
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:91
Frame queue[FRAME_QUEUE_SIZE]
Definition: ffplay.c:170
Definition: mxfdec.c:271
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define sp
Definition: regdef.h:63
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
Definition: ffplay.c:143
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:168
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:505
external API header
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:86
ptrdiff_t size
Definition: opengl_enc.c:101
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:4074
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:548
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags, int alt_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:176
static int64_t duration
Definition: ffplay.c:328
AVRational sar
Definition: ffplay.c:164
unsigned int * stream_index
Definition: avformat.h:1280
#define av_log(a,...)
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
Definition: opt.h:276
PacketQueue videoq
Definition: ffplay.c:285
AVDictionary ** setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts)
Setup AVCodecContext options for avformat_find_stream_info().
Definition: cmdutils.c:2059
AVDictionary * format_opts
Definition: cmdutils.c:72
static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
Definition: ffplay.c:831
static int borderless
Definition: ffplay.c:323
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:308
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:104
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
static void toggle_mute(VideoState *is)
Definition: ffplay.c:1508
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:555
Main libavdevice API header.
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
Definition: utils.c:4106
libswresample public header
enum AVCodecID id
Definition: avcodec.h:3736
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: utils.c:4089
int audio_diff_avg_count
Definition: ffplay.c:240
int ytop
Definition: ffplay.c:292
int width
Definition: frame.h:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:69
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1567
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:349
int seek_req
Definition: ffplay.c:211
#define AV_PIX_FMT_NE(be, le)
Definition: pixfmt.h:343
int(* callback)(void *)
Definition: avio.h:59
Create a mutex.
Definition: avcodec.h:6311
#define FRAME_QUEUE_SIZE
Definition: ffplay.c:132
static void video_refresh(void *opaque, double *remaining_time)
Definition: ffplay.c:1577
int read_pause_return
Definition: ffplay.c:215
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:488
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:293
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:4072
#define AVERROR(e)
Definition: error.h:43
static AVStream * audio_stream
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:780
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:163
The libswresample context.
#define MIN_FRAMES
Definition: ffplay.c:67
static int decoder_start(Decoder *d, int(*fn)(void *), void *arg)
Definition: ffplay.c:2103
RDFTContext * rdft
Definition: ffplay.c:267
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:796
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:96
#define EXTERNAL_CLOCK_MAX_FRAMES
Definition: ffplay.c:69
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:713
static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
Definition: ffplay.c:463
static int autorotate
Definition: ffplay.c:351
int capabilities
Codec capabilities.
Definition: avcodec.h:3741
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:106
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: utils.c:4173
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1528
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
enum AVMediaType codec_type
General type of the encoded data.
Definition: avcodec.h:4125
const char * arg
Definition: jacosubdec.c:66
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1841
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:558
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:420
AVChapter ** chapters
Definition: avformat.h:1557
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:359
uint16_t width
Definition: gdv.c:47
simple assert() macros that are a bit more flexible than ISO C assert().
static void stream_close(VideoState *is)
Definition: ffplay.c:1239
static int find_stream_info
Definition: ffplay.c:352
#define AV_PIX_FMT_0BGR32
Definition: pixfmt.h:351
int video_stream
Definition: ffplay.c:283
static int video_open(VideoState *is)
Definition: ffplay.c:1311
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
int * queue_serial
Definition: ffplay.c:150
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1409
int xpos
Definition: ffplay.c:270
int channels
Definition: ffplay.c:136
static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
Definition: ffplay.c:885
static enum ShowMode show_mode
Definition: ffplay.c:339
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1276
#define FFMAX(a, b)
Definition: common.h:94
static void packet_queue_start(PacketQueue *q)
Definition: ffplay.c:527
static const OptionDef options[]
Definition: ffplay.c:3571
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:119
float FFTSample
Definition: avfft.h:35
static int dummy
Definition: ffplay.c:3569
#define fail()
Definition: checkasm.h:98
int8_t exp
Definition: eval.c:65
enum AVPixelFormat format
Definition: ffplay.c:366
double audio_clock
Definition: ffplay.c:235
int force_refresh
Definition: ffplay.c:207
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2559
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
AVDictionary * sws_dict
Definition: cmdutils.c:70
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3498
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2340
static void update_sample_display(VideoState *is, short *samples, int samples_size)
Definition: ffplay.c:2258
void av_rdft_calc(RDFTContext *s, FFTSample *data)
uint32_t end_display_time
Definition: avcodec.h:4108
static void decoder_destroy(Decoder *d)
Definition: ffplay.c:672
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:4111
static int genpts
Definition: ffplay.c:330
static AVPacket flush_pkt
Definition: ffplay.c:358
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:379
int flip_v
Definition: ffplay.c:166
double frame_last_returned_time
Definition: ffplay.c:281
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: avfilter.c:564
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:469
static const char * subtitle_codec_name
Definition: ffplay.c:341
static int subtitle_disable
Definition: ffplay.c:319
void av_format_inject_global_side_data(AVFormatContext *s)
This function will cause global side data to be injected in the next packet of each stream as well as...
Definition: utils.c:135
int max_size
Definition: ffplay.c:174
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1405
int step
Definition: ffplay.c:293
SDL_Thread * decoder_tid
Definition: ffplay.c:200
AVDictionary * opts
Definition: movenc.c:50
static SDL_Window * window
Definition: ffplay.c:362
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:63
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
Definition: utils.c:4182
SDL_mutex * mutex
Definition: ffplay.c:125
static av_const double hypot(double x, double y)
Definition: libm.h:366
int audio_write_buf_size
Definition: ffplay.c:249
AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
Definition: format.c:164
int linesize[4]
Definition: avcodec.h:4089
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:131
int channels
number of audio channels, only used for audio.
Definition: frame.h:506
#define SUBPICTURE_QUEUE_SIZE
Definition: ffplay.c:130
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:284
struct MyAVPacketList * next
Definition: ffplay.c:114
#define AV_CH_LAYOUT_STEREO_DOWNMIX
char filename[1024]
input or output filename
Definition: avformat.h:1425
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
#define FFMIN(a, b)
Definition: common.h:96
SDL_mutex * mutex
Definition: ffplay.c:177
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:406
int windex
Definition: ffplay.c:172
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:157
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:557
static int cursor_hidden
Definition: ffplay.c:345
static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
Definition: ffplay.c:574
AVSubtitle sub
Definition: ffplay.c:156
static int lockmgr(void **mtx, enum AVLockOp op)
Definition: ffplay.c:3664
int width
picture width / height.
Definition: avcodec.h:1933
int main(int argc, char **argv)
Definition: ffplay.c:3686
int height
Definition: ffplay.c:162
#define SDL_VOLUME_STEP
Definition: ffplay.c:77
static void show_usage(void)
Definition: ffplay.c:3622
int nb_packets
Definition: ffplay.c:120
static int opt_width(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3470
static int get_video_frame(VideoState *is, AVFrame *frame)
Definition: ffplay.c:1766
int frame_drops_late
Definition: ffplay.c:259
struct AudioParams audio_src
Definition: ffplay.c:252
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
Definition: ffplay.c:3219
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1403
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:74
AVFormatContext * ctx
Definition: movenc.c:48
struct SwrContext * swr_alloc_set_opts(struct SwrContext *s, int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
Definition: swresample.c:59
static int fast
Definition: ffplay.c:329
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2280
int last_i_start
Definition: ffplay.c:266
uint16_t format
Definition: avcodec.h:4106
#define OPT_INT64
Definition: cmdutils.h:175
MyAVPacketList * last_pkt
Definition: ffplay.c:119
Definition: avfft.h:72
void av_rdft_end(RDFTContext *s)
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1520
int n
Definition: avisynth_c.h:684
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:65
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2326
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
static int is_full_screen
Definition: ffplay.c:355
#define AV_SYNC_THRESHOLD_MAX
Definition: ffplay.c:82
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:820
static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue)
Definition: ffplay.c:2721
static const AVFilterPad outputs[]
Definition: af_afftfilt.c:389
static int startup_volume
Definition: ffplay.c:324
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:72
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:474
uint8_t * data[4]
data+linesize for the bitmap of this subtitle.
Definition: avcodec.h:4088
static int decoder_reorder_pts
Definition: ffplay.c:332
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:97
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1397
int16_t sample_array[SAMPLE_ARRAY_SIZE]
Definition: ffplay.c:264
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:1022
int paused
Definition: ffplay.c:149
static const char * input_filename
Definition: ffplay.c:311
#define FF_ARRAY_ELEMS(a)
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:859
static Frame * frame_queue_peek_last(FrameQueue *f)
Definition: ffplay.c:733
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3629
int av_codec_get_max_lowres(const AVCodec *codec)
Definition: utils.c:583
int64_t pos
Definition: ffplay.c:160
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
int last_audio_stream
Definition: ffplay.c:304
Stream structure.
Definition: avformat.h:889
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: decode.c:1720
const AVClass * avfilter_get_class(void)
Definition: avfilter.c:1645
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:1017
#define SAMPLE_CORRECTION_PERCENT_MAX
Definition: ffplay.c:89
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:274
#define AV_PIX_FMT_BGR555
Definition: pixfmt.h:365
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:646
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:348
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:4863
char * filename
Definition: ffplay.c:291
static int screen_height
Definition: ffplay.c:316
static int opt_duration(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3519
sample_rate
static AVInputFormat * iformat
Definition: ffprobe.c:253
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
static const AVFilterPad inputs[]
Definition: af_afftfilt.c:379
int64_t next_pts
Definition: ffplay.c:198
static int autoexit
Definition: ffplay.c:333
AVFrame * frame
Definition: ffplay.c:155
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:237
int serial
Definition: ffplay.c:148
int uploaded
Definition: ffplay.c:165
enum AVMediaType codec_type
Definition: avcodec.h:1754
int(* read_seek)(struct AVFormatContext *, int stream_index, int64_t timestamp, int flags)
Seek to a given timestamp relative to the frames in stream component stream_index.
Definition: avformat.h:756
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
Definition: ffplay.c:854
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:172
enum AVCodecID codec_id
Definition: avcodec.h:1763
static void do_exit(VideoState *is)
Definition: ffplay.c:1276
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int sample_rate
samples per second
Definition: avcodec.h:2508
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:232
AVIOContext * pb
I/O context.
Definition: avformat.h:1391
#define ss
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:943
static int loop
Definition: ffplay.c:336
int last_paused
Definition: ffplay.c:209
static int exit_on_keydown
Definition: ffplay.c:334
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
FFT functions.
main external API structure.
Definition: avcodec.h:1746
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: utils.c:1275
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:617
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:346
Decoder subdec
Definition: ffplay.c:229
int av_copy_packet(AVPacket *dst, const AVPacket *src)
Copy packet, including contents.
Definition: avpacket.c:264