Go to the documentation of this file.
57 #include <SDL_thread.h>
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
68 #define EXTERNAL_CLOCK_MIN_FRAMES 2
69 #define EXTERNAL_CLOCK_MAX_FRAMES 10
72 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
74 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
77 #define SDL_VOLUME_STEP (0.75)
80 #define AV_SYNC_THRESHOLD_MIN 0.04
82 #define AV_SYNC_THRESHOLD_MAX 0.1
84 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
86 #define AV_NOSYNC_THRESHOLD 10.0
89 #define SAMPLE_CORRECTION_PERCENT_MAX 10
92 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
93 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
94 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
97 #define AUDIO_DIFF_AVG_NB 20
100 #define REFRESH_RATE 0.01
104 #define SAMPLE_ARRAY_SIZE (8 * 65536)
106 #define CURSOR_HIDE_DELAY 1000000
108 #define USE_ONEPASS_SUBTITLE_RENDER 1
129 #define VIDEO_PICTURE_QUEUE_SIZE 3
130 #define SUBPICTURE_QUEUE_SIZE 16
131 #define SAMPLE_QUEUE_SIZE 9
132 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
351 static const char **vfilters_list =
NULL;
352 static int nb_vfilters = 0;
353 static char *afilters =
NULL;
365 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
399 static int opt_add_vfilter(
void *optctx,
const char *opt,
const char *
arg)
402 vfilters_list[nb_vfilters - 1] =
arg;
412 if (channel_count1 == 1 && channel_count2 == 1)
415 return channel_count1 != channel_count2 || fmt1 != fmt2;
422 return channel_layout;
452 SDL_CondSignal(q->
cond);
460 SDL_LockMutex(q->
mutex);
462 SDL_UnlockMutex(q->
mutex);
484 q->
mutex = SDL_CreateMutex();
489 q->
cond = SDL_CreateCond();
502 SDL_LockMutex(q->
mutex);
513 SDL_UnlockMutex(q->
mutex);
519 SDL_DestroyMutex(q->
mutex);
520 SDL_DestroyCond(q->
cond);
525 SDL_LockMutex(q->
mutex);
529 SDL_CondSignal(q->
cond);
531 SDL_UnlockMutex(q->
mutex);
536 SDL_LockMutex(q->
mutex);
539 SDL_UnlockMutex(q->
mutex);
548 SDL_LockMutex(q->
mutex);
577 SDL_UnlockMutex(q->
mutex);
669 av_log(d->
avctx,
AV_LOG_ERROR,
"Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
694 if (!(
f->mutex = SDL_CreateMutex())) {
698 if (!(
f->cond = SDL_CreateCond())) {
704 f->keep_last = !!keep_last;
705 for (
i = 0;
i <
f->max_size;
i++)
714 for (
i = 0;
i <
f->max_size;
i++) {
719 SDL_DestroyMutex(
f->mutex);
720 SDL_DestroyCond(
f->cond);
725 SDL_LockMutex(
f->mutex);
726 SDL_CondSignal(
f->cond);
727 SDL_UnlockMutex(
f->mutex);
732 return &
f->queue[(
f->rindex +
f->rindex_shown) %
f->max_size];
737 return &
f->queue[(
f->rindex +
f->rindex_shown + 1) %
f->max_size];
742 return &
f->queue[
f->rindex];
748 SDL_LockMutex(
f->mutex);
749 while (
f->size >=
f->max_size &&
750 !
f->pktq->abort_request) {
751 SDL_CondWait(
f->cond,
f->mutex);
753 SDL_UnlockMutex(
f->mutex);
755 if (
f->pktq->abort_request)
758 return &
f->queue[
f->windex];
764 SDL_LockMutex(
f->mutex);
765 while (
f->size -
f->rindex_shown <= 0 &&
766 !
f->pktq->abort_request) {
767 SDL_CondWait(
f->cond,
f->mutex);
769 SDL_UnlockMutex(
f->mutex);
771 if (
f->pktq->abort_request)
774 return &
f->queue[(
f->rindex +
f->rindex_shown) %
f->max_size];
779 if (++
f->windex ==
f->max_size)
781 SDL_LockMutex(
f->mutex);
783 SDL_CondSignal(
f->cond);
784 SDL_UnlockMutex(
f->mutex);
789 if (
f->keep_last && !
f->rindex_shown) {
794 if (++
f->rindex ==
f->max_size)
796 SDL_LockMutex(
f->mutex);
798 SDL_CondSignal(
f->cond);
799 SDL_UnlockMutex(
f->mutex);
805 return f->size -
f->rindex_shown;
812 if (
f->rindex_shown &&
fp->serial ==
f->pktq->serial)
838 static int realloc_texture(SDL_Texture **texture, Uint32 new_format,
int new_width,
int new_height, SDL_BlendMode blendmode,
int init_texture)
842 if (!*texture || SDL_QueryTexture(*texture, &
format, &access, &
w, &
h) < 0 || new_width !=
w ||
new_height !=
h || new_format !=
format) {
846 SDL_DestroyTexture(*texture);
847 if (!(*texture = SDL_CreateTexture(
renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width,
new_height)))
849 if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
852 if (SDL_LockTexture(*texture,
NULL, &
pixels, &pitch) < 0)
855 SDL_UnlockTexture(*texture);
863 int scr_xleft,
int scr_ytop,
int scr_width,
int scr_height,
864 int pic_width,
int pic_height,
AVRational pic_sar)
877 if (
width > scr_width) {
881 x = (scr_width -
width) / 2;
882 y = (scr_height -
height) / 2;
883 rect->
x = scr_xleft + x;
884 rect->
y = scr_ytop + y;
892 *sdl_blendmode = SDL_BLENDMODE_NONE;
893 *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
898 *sdl_blendmode = SDL_BLENDMODE_BLEND;
910 SDL_BlendMode sdl_blendmode;
912 if (
realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt,
frame->width,
frame->height, sdl_blendmode, 0) < 0)
914 switch (sdl_pix_fmt) {
915 case SDL_PIXELFORMAT_UNKNOWN:
920 if (*img_convert_ctx !=
NULL) {
923 if (!SDL_LockTexture(*tex,
NULL, (
void **)
pixels, pitch)) {
926 SDL_UnlockTexture(*tex);
933 case SDL_PIXELFORMAT_IYUV:
934 if (
frame->linesize[0] > 0 &&
frame->linesize[1] > 0 &&
frame->linesize[2] > 0) {
938 }
else if (
frame->linesize[0] < 0 &&
frame->linesize[1] < 0 &&
frame->linesize[2] < 0) {
948 if (
frame->linesize[0] < 0) {
960 #if SDL_VERSION_ATLEAST(2,0,8)
961 SDL_YUV_CONVERSION_MODE
mode = SDL_YUV_CONVERSION_AUTOMATIC;
964 mode = SDL_YUV_CONVERSION_JPEG;
966 mode = SDL_YUV_CONVERSION_BT709;
968 mode = SDL_YUV_CONVERSION_BT601;
970 SDL_SetYUVConversionMode(
mode);
981 if (
is->subtitle_st) {
985 if (vp->
pts >=
sp->pts + ((
float)
sp->sub.start_display_time / 1000)) {
990 if (!
sp->width || !
sp->height) {
994 if (
realloc_texture(&
is->sub_texture, SDL_PIXELFORMAT_ARGB8888,
sp->width,
sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
997 for (
i = 0;
i <
sp->sub.num_rects;
i++) {
1000 sub_rect->
x = av_clip(sub_rect->
x, 0,
sp->width );
1001 sub_rect->
y = av_clip(sub_rect->
y, 0,
sp->height);
1002 sub_rect->
w = av_clip(sub_rect->
w, 0,
sp->width - sub_rect->
x);
1003 sub_rect->
h = av_clip(sub_rect->
h, 0,
sp->height - sub_rect->
y);
1009 if (!
is->sub_convert_ctx) {
1013 if (!SDL_LockTexture(
is->sub_texture, (SDL_Rect *)sub_rect, (
void **)
pixels, pitch)) {
1014 sws_scale(
is->sub_convert_ctx, (
const uint8_t *
const *)sub_rect->data, sub_rect->linesize,
1015 0, sub_rect->h,
pixels, pitch);
1016 SDL_UnlockTexture(
is->sub_texture);
1039 #if USE_ONEPASS_SUBTITLE_RENDER
1043 double xratio = (double)
rect.
w / (
double)
sp->width;
1044 double yratio = (double)
rect.
h / (
double)
sp->height;
1045 for (
i = 0;
i <
sp->sub.num_rects;
i++) {
1046 SDL_Rect *sub_rect = (SDL_Rect*)
sp->sub.rects[
i];
1047 SDL_Rect target = {.x =
rect.
x + sub_rect->x * xratio,
1048 .y =
rect.
y + sub_rect->y * yratio,
1049 .w = sub_rect->w * xratio,
1050 .h = sub_rect->h * yratio};
1051 SDL_RenderCopy(
renderer,
is->sub_texture, sub_rect, &target);
1059 return a < 0 ?
a%
b +
b :
a%
b;
1064 int i, i_start, x, y1, y, ys, delay,
n, nb_display_channels;
1067 int rdft_bits, nb_freq;
1069 for (rdft_bits = 1; (1 << rdft_bits) < 2 *
s->height; rdft_bits++)
1071 nb_freq = 1 << (rdft_bits - 1);
1077 int data_used=
s->show_mode == SHOW_MODE_WAVES ?
s->width : (2*nb_freq);
1079 delay =
s->audio_write_buf_size;
1086 delay -= (time_diff *
s->audio_tgt.freq) / 1000000;
1089 delay += 2 * data_used;
1090 if (delay < data_used)
1094 if (
s->show_mode == SHOW_MODE_WAVES) {
1098 int a =
s->sample_array[idx];
1103 if (
h < score && (
b ^
c) < 0) {
1110 s->last_i_start = i_start;
1112 i_start =
s->last_i_start;
1115 if (
s->show_mode == SHOW_MODE_WAVES) {
1116 SDL_SetRenderDrawColor(
renderer, 255, 255, 255, 255);
1119 h =
s->height / nb_display_channels;
1122 for (
ch = 0;
ch < nb_display_channels;
ch++) {
1124 y1 =
s->ytop +
ch *
h + (
h / 2);
1125 for (x = 0; x <
s->width; x++) {
1126 y = (
s->sample_array[
i] * h2) >> 15;
1140 SDL_SetRenderDrawColor(
renderer, 0, 0, 255, 255);
1142 for (
ch = 1;
ch < nb_display_channels;
ch++) {
1143 y =
s->ytop +
ch *
h;
1147 if (
realloc_texture(&
s->vis_texture, SDL_PIXELFORMAT_ARGB8888,
s->width,
s->height, SDL_BLENDMODE_NONE, 1) < 0)
1150 nb_display_channels=
FFMIN(nb_display_channels, 2);
1151 if (rdft_bits !=
s->rdft_bits) {
1155 s->rdft_bits = rdft_bits;
1158 if (!
s->rdft || !
s->rdft_data){
1160 s->show_mode = SHOW_MODE_WAVES;
1163 SDL_Rect
rect = {.
x =
s->xpos, .y = 0, .w = 1, .h =
s->height};
1166 for (
ch = 0;
ch < nb_display_channels;
ch++) {
1167 data[
ch] =
s->rdft_data + 2 * nb_freq *
ch;
1169 for (x = 0; x < 2 * nb_freq; x++) {
1170 double w = (x-nb_freq) * (1.0 / nb_freq);
1171 data[
ch][x] =
s->sample_array[
i] * (1.0 -
w *
w);
1180 if (!SDL_LockTexture(
s->vis_texture, &
rect, (
void **)&
pixels, &pitch)) {
1183 for (y = 0; y <
s->height; y++) {
1184 double w = 1 / sqrt(nb_freq);
1185 int a = sqrt(
w * sqrt(
data[0][2 * y + 0] *
data[0][2 * y + 0] +
data[0][2 * y + 1] *
data[0][2 * y + 1]));
1186 int b = (nb_display_channels == 2 ) ? sqrt(
w *
hypot(
data[1][2 * y + 0],
data[1][2 * y + 1]))
1191 *
pixels = (
a << 16) + (
b << 8) + ((
a+
b) >> 1);
1193 SDL_UnlockTexture(
s->vis_texture);
1199 if (
s->xpos >=
s->width)
1209 if (stream_index < 0 || stream_index >= ic->
nb_streams)
1220 is->audio_buf1_size = 0;
1246 is->audio_stream = -1;
1250 is->video_stream = -1;
1254 is->subtitle_stream = -1;
1264 is->abort_request = 1;
1265 SDL_WaitThread(
is->read_tid,
NULL);
1268 if (
is->audio_stream >= 0)
1270 if (
is->video_stream >= 0)
1272 if (
is->subtitle_stream >= 0)
1285 SDL_DestroyCond(
is->continue_read_thread);
1289 if (
is->vis_texture)
1290 SDL_DestroyTexture(
is->vis_texture);
1291 if (
is->vid_texture)
1292 SDL_DestroyTexture(
is->vid_texture);
1293 if (
is->sub_texture)
1294 SDL_DestroyTexture(
is->sub_texture);
1306 SDL_DestroyWindow(
window);
1329 if (max_width == INT_MAX && max_height == INT_MAX)
1350 SDL_SetWindowFullscreen(
window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1365 SDL_SetRenderDrawColor(
renderer, 0, 0, 0, 255);
1367 if (
is->audio_st &&
is->show_mode != SHOW_MODE_VIDEO)
1369 else if (
is->video_st)
1376 if (*
c->queue_serial !=
c->serial)
1382 return c->pts_drift + time - (time -
c->last_updated) * (1.0 -
c->speed);
1389 c->last_updated = time;
1390 c->pts_drift =
c->pts - time;
1410 c->queue_serial = queue_serial;
1465 double speed =
is->extclk.speed;
1474 if (!
is->seek_req) {
1481 SDL_CondSignal(
is->continue_read_thread);
1490 if (
is->read_pause_return !=
AVERROR(ENOSYS)) {
1491 is->vidclk.paused = 0;
1496 is->paused =
is->audclk.paused =
is->vidclk.paused =
is->extclk.paused = !
is->paused;
1507 is->muted = !
is->muted;
1512 double volume_level =
is->audio_volume ? (20 * log(
is->audio_volume / (
double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1513 int new_volume =
lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign *
step) / 20.0));
1514 is->audio_volume = av_clip(
is->audio_volume == new_volume ? (
is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1527 double sync_threshold,
diff = 0;
1540 if (
diff <= -sync_threshold)
1543 delay = delay +
diff;
1544 else if (
diff >= sync_threshold)
1558 if (
isnan(
duration) || duration <= 0 || duration >
is->max_frame_duration)
1586 if (
is->force_refresh ||
is->last_vis_time +
rdftspeed < time) {
1588 is->last_vis_time = time;
1590 *remaining_time =
FFMIN(*remaining_time,
is->last_vis_time +
rdftspeed - time);
1598 double last_duration,
duration, delay;
1605 if (vp->
serial !=
is->videoq.serial) {
1621 if (time < is->frame_timer + delay) {
1622 *remaining_time =
FFMIN(
is->frame_timer + delay - time, *remaining_time);
1626 is->frame_timer += delay;
1628 is->frame_timer = time;
1630 SDL_LockMutex(
is->pictq.mutex);
1633 SDL_UnlockMutex(
is->pictq.mutex);
1639 is->frame_drops_late++;
1645 if (
is->subtitle_st) {
1654 if (
sp->serial !=
is->subtitleq.serial
1655 || (
is->vidclk.pts > (
sp->pts + ((
float)
sp->sub.end_display_time / 1000)))
1660 for (
i = 0;
i <
sp->sub.num_rects;
i++) {
1665 if (!SDL_LockTexture(
is->sub_texture, (SDL_Rect *)sub_rect, (
void **)&
pixels, &pitch)) {
1666 for (j = 0; j < sub_rect->h; j++,
pixels += pitch)
1667 memset(
pixels, 0, sub_rect->w << 2);
1668 SDL_UnlockTexture(
is->sub_texture);
1680 is->force_refresh = 1;
1682 if (
is->step && !
is->paused)
1687 if (!
display_disable &&
is->force_refresh &&
is->show_mode == SHOW_MODE_VIDEO &&
is->pictq.rindex_shown)
1690 is->force_refresh = 0;
1692 static int64_t last_time;
1694 int aqsize, vqsize, sqsize;
1698 if (!last_time || (cur_time - last_time) >= 30000) {
1703 aqsize =
is->audioq.size;
1705 vqsize =
is->videoq.size;
1706 if (
is->subtitle_st)
1707 sqsize =
is->subtitleq.size;
1709 if (
is->audio_st &&
is->video_st)
1711 else if (
is->video_st)
1713 else if (
is->audio_st)
1716 "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64
"/%"PRId64
" \r",
1718 (
is->audio_st &&
is->video_st) ?
"A-V" : (
is->video_st ?
"M-V" : (
is->audio_st ?
"M-A" :
" ")),
1720 is->frame_drops_early +
is->frame_drops_late,
1724 is->video_st ?
is->viddec.avctx->pts_correction_num_faulty_dts : 0,
1725 is->video_st ?
is->viddec.avctx->pts_correction_num_faulty_pts : 0);
1727 last_time = cur_time;
1736 #if defined(DEBUG_SYNC)
1737 printf(
"frame_type=%c pts=%0.3f\n",
1782 diff -
is->frame_last_filter_delay < 0 &&
1783 is->viddec.pkt_serial ==
is->vidclk.serial &&
1784 is->videoq.nb_packets) {
1785 is->frame_drops_early++;
1813 outputs->filter_ctx = source_ctx;
1818 inputs->filter_ctx = sink_ctx;
1843 char sws_flags_str[512] =
"";
1844 char buffersrc_args[256];
1850 int nb_pix_fmts = 0;
1864 if (!strcmp(e->
key,
"sws_flags")) {
1865 av_strlcatf(sws_flags_str,
sizeof(sws_flags_str),
"%s=%s:",
"flags", e->
value);
1869 if (strlen(sws_flags_str))
1870 sws_flags_str[strlen(sws_flags_str)-1] =
'\0';
1874 snprintf(buffersrc_args,
sizeof(buffersrc_args),
1875 "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1877 is->video_st->time_base.num,
is->video_st->time_base.den,
1880 av_strlcatf(buffersrc_args,
sizeof(buffersrc_args),
":frame_rate=%d/%d", fr.
num, fr.
den);
1884 "ffplay_buffer", buffersrc_args,
NULL,
1890 "ffplay_buffersink",
NULL,
NULL, graph);
1897 last_filter = filt_out;
1901 #define INSERT_FILT(name, arg) do { \
1902 AVFilterContext *filt_ctx; \
1904 ret = avfilter_graph_create_filter(&filt_ctx, \
1905 avfilter_get_by_name(name), \
1906 "ffplay_" name, arg, NULL, graph); \
1910 ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1914 last_filter = filt_ctx; \
1920 if (fabs(theta - 90) < 1.0) {
1921 INSERT_FILT(
"transpose",
"clock");
1922 }
else if (fabs(theta - 180) < 1.0) {
1923 INSERT_FILT(
"hflip",
NULL);
1924 INSERT_FILT(
"vflip",
NULL);
1925 }
else if (fabs(theta - 270) < 1.0) {
1926 INSERT_FILT(
"transpose",
"cclock");
1927 }
else if (fabs(theta) > 1.0) {
1928 char rotate_buf[64];
1929 snprintf(rotate_buf,
sizeof(rotate_buf),
"%f*PI/180", theta);
1930 INSERT_FILT(
"rotate", rotate_buf);
1937 is->in_video_filter = filt_src;
1938 is->out_video_filter = filt_out;
1944 static int configure_audio_filters(
VideoState *
is,
const char *afilters,
int force_output_format)
1951 char aresample_swr_opts[512] =
"";
1953 char asrc_args[256];
1963 if (strlen(aresample_swr_opts))
1964 aresample_swr_opts[strlen(aresample_swr_opts)-1] =
'\0';
1965 av_opt_set(
is->agraph,
"aresample_swr_opts", aresample_swr_opts, 0);
1968 "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1970 is->audio_filter_src.channels,
1971 1,
is->audio_filter_src.freq);
1972 if (
is->audio_filter_src.channel_layout)
1974 ":channel_layout=0x%"PRIx64,
is->audio_filter_src.channel_layout);
1978 asrc_args,
NULL,
is->agraph);
1994 if (force_output_format) {
2012 is->in_audio_filter = filt_asrc;
2013 is->out_audio_filter = filt_asink;
2028 int last_serial = -1;
2029 int64_t dec_channel_layout;
2052 is->audio_filter_src.channel_layout != dec_channel_layout ||
2053 is->audio_filter_src.freq !=
frame->sample_rate ||
2054 is->auddec.pkt_serial != last_serial;
2057 char buf1[1024], buf2[1024];
2061 "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2065 is->audio_filter_src.fmt =
frame->format;
2066 is->audio_filter_src.channels =
frame->channels;
2067 is->audio_filter_src.channel_layout = dec_channel_layout;
2068 is->audio_filter_src.freq =
frame->sample_rate;
2069 last_serial =
is->auddec.pkt_serial;
2071 if ((
ret = configure_audio_filters(
is, afilters, 1)) < 0)
2086 af->
serial =
is->auddec.pkt_serial;
2093 if (
is->audioq.serial !=
is->auddec.pkt_serial)
2097 is->auddec.finished =
is->auddec.pkt_serial;
2136 int last_serial = -1;
2137 int last_vfilter_idx = 0;
2151 if ( last_w !=
frame->width
2152 || last_h !=
frame->height
2153 || last_format !=
frame->format
2154 || last_serial !=
is->viddec.pkt_serial
2155 || last_vfilter_idx !=
is->vfilter_idx) {
2157 "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2169 if ((
ret = configure_video_filters(graph,
is, vfilters_list ? vfilters_list[
is->vfilter_idx] :
NULL,
frame)) < 0) {
2172 event.user.data1 =
is;
2173 SDL_PushEvent(&event);
2176 filt_in =
is->in_video_filter;
2177 filt_out =
is->out_video_filter;
2178 last_w =
frame->width;
2179 last_h =
frame->height;
2180 last_format =
frame->format;
2181 last_serial =
is->viddec.pkt_serial;
2182 last_vfilter_idx =
is->vfilter_idx;
2196 is->viddec.finished =
is->viddec.pkt_serial;
2203 is->frame_last_filter_delay = 0;
2211 if (
is->videoq.serial !=
is->viddec.pkt_serial)
2243 if (got_subtitle &&
sp->sub.format == 0) {
2247 sp->serial =
is->subdec.pkt_serial;
2248 sp->width =
is->subdec.avctx->width;
2249 sp->height =
is->subdec.avctx->height;
2254 }
else if (got_subtitle) {
2271 memcpy(
is->sample_array +
is->sample_array_index,
samples,
len *
sizeof(
short));
2273 is->sample_array_index +=
len;
2275 is->sample_array_index = 0;
2284 int wanted_nb_samples = nb_samples;
2288 double diff, avg_diff;
2289 int min_nb_samples, max_nb_samples;
2294 is->audio_diff_cum =
diff +
is->audio_diff_avg_coef *
is->audio_diff_cum;
2297 is->audio_diff_avg_count++;
2300 avg_diff =
is->audio_diff_cum * (1.0 -
is->audio_diff_avg_coef);
2302 if (fabs(avg_diff) >=
is->audio_diff_threshold) {
2303 wanted_nb_samples = nb_samples + (
int)(
diff *
is->audio_src.freq);
2306 wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2309 diff, avg_diff, wanted_nb_samples - nb_samples,
2310 is->audio_clock,
is->audio_diff_threshold);
2315 is->audio_diff_avg_count = 0;
2316 is->audio_diff_cum = 0;
2320 return wanted_nb_samples;
2332 int data_size, resampled_data_size;
2333 int64_t dec_channel_layout;
2335 int wanted_nb_samples;
2352 }
while (af->
serial !=
is->audioq.serial);
2358 dec_channel_layout =
2364 dec_channel_layout !=
is->audio_src.channel_layout ||
2369 is->audio_tgt.channel_layout,
is->audio_tgt.fmt,
is->audio_tgt.freq,
2374 "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2380 is->audio_src.channel_layout = dec_channel_layout;
2389 int out_count = (int64_t)wanted_nb_samples *
is->audio_tgt.freq / af->
frame->
sample_rate + 256;
2404 if (!
is->audio_buf1)
2411 if (len2 == out_count) {
2416 is->audio_buf =
is->audio_buf1;
2420 resampled_data_size = data_size;
2423 audio_clock0 =
is->audio_clock;
2428 is->audio_clock =
NAN;
2429 is->audio_clock_serial = af->
serial;
2432 static double last_clock;
2433 printf(
"audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2434 is->audio_clock - last_clock,
2435 is->audio_clock, audio_clock0);
2436 last_clock =
is->audio_clock;
2439 return resampled_data_size;
2446 int audio_size, len1;
2451 if (
is->audio_buf_index >=
is->audio_buf_size) {
2453 if (audio_size < 0) {
2458 if (
is->show_mode != SHOW_MODE_VIDEO)
2460 is->audio_buf_size = audio_size;
2462 is->audio_buf_index = 0;
2464 len1 =
is->audio_buf_size -
is->audio_buf_index;
2467 if (!
is->muted &&
is->audio_buf &&
is->audio_volume == SDL_MIX_MAXVOLUME)
2468 memcpy(stream, (
uint8_t *)
is->audio_buf +
is->audio_buf_index, len1);
2470 memset(stream, 0, len1);
2471 if (!
is->muted &&
is->audio_buf)
2472 SDL_MixAudioFormat(stream, (
uint8_t *)
is->audio_buf +
is->audio_buf_index, AUDIO_S16SYS, len1,
is->audio_volume);
2476 is->audio_buf_index += len1;
2478 is->audio_write_buf_size =
is->audio_buf_size -
is->audio_buf_index;
2480 if (!
isnan(
is->audio_clock)) {
2486 static int audio_open(
void *opaque, int64_t wanted_channel_layout,
int wanted_nb_channels,
int wanted_sample_rate,
struct AudioParams *audio_hw_params)
2488 SDL_AudioSpec wanted_spec, spec;
2490 static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2491 static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2492 int next_sample_rate_idx =
FF_ARRAY_ELEMS(next_sample_rates) - 1;
2494 env = SDL_getenv(
"SDL_AUDIO_CHANNELS");
2496 wanted_nb_channels = atoi(env);
2504 wanted_spec.channels = wanted_nb_channels;
2505 wanted_spec.freq = wanted_sample_rate;
2506 if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2510 while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2511 next_sample_rate_idx--;
2512 wanted_spec.format = AUDIO_S16SYS;
2513 wanted_spec.silence = 0;
2516 wanted_spec.userdata = opaque;
2517 while (!(
audio_dev = SDL_OpenAudioDevice(
NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2519 wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2520 wanted_spec.channels = next_nb_channels[
FFMIN(7, wanted_spec.channels)];
2521 if (!wanted_spec.channels) {
2522 wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2523 wanted_spec.channels = wanted_nb_channels;
2524 if (!wanted_spec.freq) {
2526 "No more combinations to try, audio open failed\n");
2532 if (spec.format != AUDIO_S16SYS) {
2534 "SDL advised audio format %d is not supported!\n", spec.format);
2537 if (spec.channels != wanted_spec.channels) {
2539 if (!wanted_channel_layout) {
2541 "SDL advised channel count %d is not supported!\n", spec.channels);
2547 audio_hw_params->
freq = spec.freq;
2549 audio_hw_params->
channels = spec.channels;
2565 const char *forced_codec_name =
NULL;
2569 int64_t channel_layout;
2571 int stream_lowres =
lowres;
2573 if (stream_index < 0 || stream_index >= ic->
nb_streams)
2592 if (forced_codec_name)
2596 "No codec could be found with name '%s'\n", forced_codec_name);
2609 avctx->
lowres = stream_lowres;
2639 is->audio_filter_src.channels = avctx->
channels;
2642 if ((
ret = configure_audio_filters(
is, afilters, 0)) < 0)
2644 sink =
is->out_audio_filter;
2658 is->audio_hw_buf_size =
ret;
2659 is->audio_src =
is->audio_tgt;
2660 is->audio_buf_size = 0;
2661 is->audio_buf_index = 0;
2665 is->audio_diff_avg_count = 0;
2668 is->audio_diff_threshold = (double)(
is->audio_hw_buf_size) /
is->audio_tgt.bytes_per_sec;
2670 is->audio_stream = stream_index;
2671 is->audio_st = ic->
streams[stream_index];
2675 is->auddec.start_pts =
is->audio_st->start_time;
2676 is->auddec.start_pts_tb =
is->audio_st->time_base;
2683 is->video_stream = stream_index;
2684 is->video_st = ic->
streams[stream_index];
2689 is->queue_attachments_req = 1;
2692 is->subtitle_stream = stream_index;
2693 is->subtitle_st = ic->
streams[stream_index];
2715 return is->abort_request;
2719 return stream_id < 0 ||
2727 if( !strcmp(
s->iformat->name,
"rtp")
2728 || !strcmp(
s->iformat->name,
"rtsp")
2729 || !strcmp(
s->iformat->name,
"sdp")
2733 if(
s->pb && ( !strncmp(
s->url,
"rtp:", 4)
2734 || !strncmp(
s->url,
"udp:", 4)
2749 int64_t stream_start_time;
2750 int pkt_in_play_range = 0;
2752 SDL_mutex *wait_mutex = SDL_CreateMutex();
2753 int scan_all_pmts_set = 0;
2762 memset(st_index, -1,
sizeof(st_index));
2775 scan_all_pmts_set = 1;
2783 if (scan_all_pmts_set)
2804 for (
i = 0;
i < orig_nb_streams;
i++)
2810 "%s: could not find codec parameters\n",
is->filename);
2858 st_index[
i] = INT_MAX;
2886 if (codecpar->
width)
2899 if (
is->show_mode == SHOW_MODE_NONE)
2900 is->show_mode =
ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2906 if (
is->video_stream < 0 &&
is->audio_stream < 0) {
2913 if (infinite_buffer < 0 && is->realtime)
2917 if (
is->abort_request)
2919 if (
is->paused !=
is->last_paused) {
2920 is->last_paused =
is->paused;
2926 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2937 int64_t seek_target =
is->seek_pos;
2938 int64_t seek_min =
is->seek_rel > 0 ? seek_target -
is->seek_rel + 2: INT64_MIN;
2939 int64_t seek_max =
is->seek_rel < 0 ? seek_target -
is->seek_rel - 2: INT64_MAX;
2946 "%s: error while seeking\n",
is->ic->url);
2948 if (
is->audio_stream >= 0) {
2952 if (
is->subtitle_stream >= 0) {
2956 if (
is->video_stream >= 0) {
2967 is->queue_attachments_req = 1;
2972 if (
is->queue_attachments_req) {
2980 is->queue_attachments_req = 0;
2990 SDL_LockMutex(wait_mutex);
2991 SDL_CondWaitTimeout(
is->continue_read_thread, wait_mutex, 10);
2992 SDL_UnlockMutex(wait_mutex);
3008 if (
is->video_stream >= 0)
3010 if (
is->audio_stream >= 0)
3012 if (
is->subtitle_stream >= 0)
3018 SDL_LockMutex(wait_mutex);
3019 SDL_CondWaitTimeout(
is->continue_read_thread, wait_mutex, 10);
3020 SDL_UnlockMutex(wait_mutex);
3029 (pkt_ts - (stream_start_time !=
AV_NOPTS_VALUE ? stream_start_time : 0)) *
3054 event.user.data1 =
is;
3055 SDL_PushEvent(&event);
3057 SDL_DestroyMutex(wait_mutex);
3068 is->last_video_stream =
is->video_stream = -1;
3069 is->last_audio_stream =
is->audio_stream = -1;
3070 is->last_subtitle_stream =
is->subtitle_stream = -1;
3091 if (!(
is->continue_read_thread = SDL_CreateCond())) {
3099 is->audio_clock_serial = -1;
3110 if (!
is->read_tid) {
3122 int start_index, stream_index;
3129 start_index =
is->last_video_stream;
3130 old_index =
is->video_stream;
3132 start_index =
is->last_audio_stream;
3133 old_index =
is->audio_stream;
3135 start_index =
is->last_subtitle_stream;
3136 old_index =
is->subtitle_stream;
3138 stream_index = start_index;
3144 for (start_index = 0; start_index <
nb_streams; start_index++)
3149 stream_index = start_index;
3159 is->last_subtitle_stream = -1;
3162 if (start_index == -1)
3166 if (stream_index == start_index)
3168 st =
is->ic->streams[p ? p->
stream_index[stream_index] : stream_index];
3186 if (p && stream_index != -1)
3206 int next =
is->show_mode;
3208 next = (next + 1) % SHOW_MODE_NB;
3209 }
while (next !=
is->show_mode && (next == SHOW_MODE_VIDEO && !
is->video_st || next != SHOW_MODE_VIDEO && !
is->audio_st));
3210 if (
is->show_mode != next) {
3211 is->force_refresh = 1;
3212 is->show_mode = next;
3217 double remaining_time = 0.0;
3219 while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3224 if (remaining_time > 0.0)
3225 av_usleep((int64_t)(remaining_time * 1000000.0));
3227 if (
is->show_mode != SHOW_MODE_NONE && (!
is->paused ||
is->force_refresh))
3238 if (!
is->ic->nb_chapters)
3242 for (
i = 0;
i <
is->ic->nb_chapters;
i++) {
3252 if (
i >=
is->ic->nb_chapters)
3264 double incr, pos, frac;
3269 switch (event.type) {
3271 if (
exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
3276 if (!cur_stream->
width)
3278 switch (event.key.keysym.sym) {
3290 case SDLK_KP_MULTIPLY:
3294 case SDLK_KP_DIVIDE:
3317 if (cur_stream->
show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3318 if (++cur_stream->vfilter_idx >= nb_vfilters)
3319 cur_stream->vfilter_idx = 0;
3321 cur_stream->vfilter_idx = 0;
3382 case SDL_MOUSEBUTTONDOWN:
3387 if (event.button.button == SDL_BUTTON_LEFT) {
3388 static int64_t last_mouse_left_click = 0;
3392 last_mouse_left_click = 0;
3397 case SDL_MOUSEMOTION:
3403 if (event.type == SDL_MOUSEBUTTONDOWN) {
3404 if (event.button.button != SDL_BUTTON_RIGHT)
3408 if (!(event.motion.state & SDL_BUTTON_RMASK))
3418 int tns, thh, tmm, tss;
3421 tmm = (tns % 3600) / 60;
3423 frac = x / cur_stream->
width;
3426 mm = (
ns % 3600) / 60;
3429 "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3430 hh, mm,
ss, thh, tmm, tss);
3437 case SDL_WINDOWEVENT:
3438 switch (event.window.event) {
3439 case SDL_WINDOWEVENT_SIZE_CHANGED:
3446 case SDL_WINDOWEVENT_EXPOSED:
3496 if (!strcmp(
arg,
"audio"))
3498 else if (!strcmp(
arg,
"video"))
3500 else if (!strcmp(
arg,
"ext"))
3524 !strcmp(
arg,
"waves") ? SHOW_MODE_WAVES :
3525 !strcmp(
arg,
"rdft" ) ? SHOW_MODE_RDFT :
3534 "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3538 if (!strcmp(filename,
"-"))
3545 const char *spec = strchr(opt,
':');
3548 "No media specifier was specified in '%s' in option '%s'\n",
3559 "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3569 {
"x",
HAS_ARG, { .func_arg =
opt_width },
"force displayed width",
"width" },
3570 {
"y",
HAS_ARG, { .func_arg =
opt_height },
"force displayed height",
"height" },
3579 {
"ss",
HAS_ARG, { .func_arg =
opt_seek },
"seek to a given position in seconds",
"pos" },
3580 {
"t",
HAS_ARG, { .func_arg =
opt_duration },
"play \"duration\" seconds of audio/video",
"duration" },
3594 {
"sync",
HAS_ARG |
OPT_EXPERT, { .func_arg =
opt_sync },
"set audio-video sync. type (type=audio/video/ext)",
"type" },
3605 {
"vf",
OPT_EXPERT |
HAS_ARG, { .func_arg = opt_add_vfilter },
"set video filters",
"filter_graph" },
3606 {
"af",
OPT_STRING |
HAS_ARG, { &afilters },
"set audio filters",
"filter_graph" },
3609 {
"showmode",
HAS_ARG, { .func_arg =
opt_show_mode},
"select show mode (0 = video, 1 = waves, 2 = RDFT)",
"mode" },
3611 {
"i",
OPT_BOOL, { &
dummy},
"read specified file",
"input_file"},
3612 {
"codec",
HAS_ARG, { .func_arg =
opt_codec},
"force decoder",
"decoder_name" },
3618 "read and decode the streams to fill missing information with heuristics" },
3639 #if !CONFIG_AVFILTER
3644 printf(
"\nWhile playing:\n"
3646 "f toggle full screen\n"
3649 "9, 0 decrease and increase volume respectively\n"
3650 "/, * decrease and increase volume respectively\n"
3651 "a cycle audio channel in the current program\n"
3652 "v cycle video channel\n"
3653 "t cycle subtitle channel in the current program\n"
3655 "w cycle video filters or show modes\n"
3656 "s activate frame-step mode\n"
3657 "left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
3658 "down/up seek backward/forward 1 minute\n"
3659 "page down/page up seek backward/forward 10 minutes\n"
3660 "right mouse click seek to percentage in file corresponding to fraction of width\n"
3661 "left double-click toggle full screen\n"
3695 "Use -h to get full help or, even better, run 'man %s'\n",
program_name);
3702 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3704 flags &= ~SDL_INIT_AUDIO;
3708 if (!SDL_getenv(
"SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3709 SDL_setenv(
"SDL_AUDIO_ALSA_SET_BUFFER_SIZE",
"1", 1);
3712 flags &= ~SDL_INIT_VIDEO;
3713 if (SDL_Init (
flags)) {
3719 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3720 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3726 int flags = SDL_WINDOW_HIDDEN;
3728 #if SDL_VERSION_ATLEAST(2,0,5)
3729 flags |= SDL_WINDOW_ALWAYS_ON_TOP;
3731 av_log(
NULL,
AV_LOG_WARNING,
"Your SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.\n");
3734 flags |= SDL_WINDOW_BORDERLESS;
3736 flags |= SDL_WINDOW_RESIZABLE;
3738 SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY,
"linear");
3740 renderer = SDL_CreateRenderer(
window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
static void do_exit(VideoState *is)
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
static int opt_frame_size(void *optctx, const char *opt, const char *arg)
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
#define AV_LOG_WARNING
Something somehow does not look correct.
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static SDL_RendererInfo renderer_info
int configure_filtergraph(FilterGraph *fg)
static int frame_queue_nb_remaining(FrameQueue *f)
static void frame_queue_next(FrameQueue *f)
enum AVMediaType codec_type
General type of the encoded data.
int nb_threads
Maximum number of threads used by filters in this graph.
uint64_t channel_layout
Audio channel layout.
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
unsigned int nb_stream_indexes
static int64_t frame_queue_last_pos(FrameQueue *f)
int sample_rate
samples per second
#define FFSWAP(type, a, b)
static int video_thread(void *arg)
The official guide to swscale for confused that is
static void set_default_window_size(int width, int height, AVRational sar)
#define AV_NOSYNC_THRESHOLD
unsigned int nb_chapters
Number of chapters in AVChapter array.
This struct describes the properties of an encoded stream.
#define AV_LOG_QUIET
Print no output.
static enum AVSampleFormat sample_fmts[]
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
#define AVERROR_EOF
End of file.
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
static int display_disable
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
#define SAMPLE_ARRAY_SIZE
static void update_volume(VideoState *is, int sign, double step)
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
char * av_asprintf(const char *fmt,...)
static int decoder_start(Decoder *d, int(*fn)(void *), const char *thread_name, void *arg)
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx)
static av_cold int end(AVCodecContext *avctx)
SDL_Texture * vis_texture
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
This structure describes decoded (raw) audio or video data.
AVStream ** streams
A list of all streams in the file.
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
double frame_last_filter_delay
@ AVCOL_RANGE_JPEG
the normal 2^n-1 "JPEG" YUV ranges
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
static const char * subtitle_codec_name
#define EXTERNAL_CLOCK_MIN_FRAMES
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
static void frame_queue_destory(FrameQueue *f)
#define SAMPLE_QUEUE_SIZE
const char program_name[]
program name, defined by the program for show_version().
AVDictionary * format_opts
int error
contains the error code or 0 if no error happened
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
#define AV_PIX_FMT_RGB32_1
double audio_diff_avg_coef
#define AV_LOG_VERBOSE
Detailed information.
#define CURSOR_HIDE_DELAY
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
static double compute_target_delay(double delay, VideoState *is)
static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
static void stream_close(VideoState *is)
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
static void init_clock(Clock *c, int *queue_serial)
enum AVMediaType codec_type
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
static int opt_seek(void *optctx, const char *opt, const char *arg)
int64_t avio_size(AVIOContext *s)
Get the filesize.
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
static double get_master_clock(VideoState *is)
static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial)
static int subtitle_thread(void *arg)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
static int subtitle_disable
struct SwrContext * swr_ctx
static int opt_sync(void *optctx, const char *opt, const char *arg)
static void step_to_next_frame(VideoState *is)
enum AVPixelFormat format
static void video_display(VideoState *is)
uint8_t max_lowres
maximum value for lowres supported by the decoder
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max)
Parse a string and return its corresponding value as a double.
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
#define SDL_AUDIO_MIN_BUFFER_SIZE
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
static int startup_volume
static SDL_Window * window
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
static void toggle_full_screen(VideoState *is)
ff_const59 struct AVInputFormat * iformat
The input container format.
static int packet_queue_init(PacketQueue *q)
#define AUDIO_DIFF_AVG_NB
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
static int opt_duration(void *optctx, const char *opt, const char *arg)
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
int x
top left corner of pict, undefined when pict is not set
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
#define AVERROR_OPTION_NOT_FOUND
Option not found.
static void video_image_display(VideoState *is)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
SDL_cond * empty_queue_cond
static void set_clock_speed(Clock *c, double speed)
double audio_diff_threshold
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
#define ss(width, name, subs,...)
int avformat_network_init(void)
Do global initialization of network libraries.
static int opt_height(void *optctx, const char *opt, const char *arg)
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static const struct TextureFormatEntry sdl_texture_format_map[]
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
static int is_full_screen
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
static void set_sdl_yuv_conversion_mode(AVFrame *frame)
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio.
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
AVDictionary * metadata
Metadata that applies to the whole file.
static int audio_thread(void *arg)
static void set_clock(Clock *c, double pts, int serial)
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
static Frame * frame_queue_peek_next(FrameQueue *f)
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
static void sync_clock_to_slave(Clock *c, Clock *slave)
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
static void opt_input_file(void *optctx, const char *filename)
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
static void frame_queue_signal(FrameQueue *f)
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
AVDictionary * filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, AVCodec *codec)
Filter out options for given codec.
struct SwsContext * img_convert_ctx
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
#define AV_CEIL_RSHIFT(a, b)
static int default_height
int flags
Flags modifying the (de)muxer behaviour.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
AVRational sample_aspect_ratio
Video only.
int channels
number of audio channels, only used for audio.
#define AV_PIX_FMT_0BGR32
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
int y
top left corner of pict, undefined when pict is not set
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
static double av_q2d(AVRational a)
Convert an AVRational to a double.
#define EXTERNAL_CLOCK_SPEED_STEP
#define AV_CH_LAYOUT_STEREO_DOWNMIX
static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
static const AVFilterPad outputs[]
static enum AVPixelFormat pix_fmts[]
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static int opt_codec(void *optctx, const char *opt, const char *arg)
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t *out_arg[SWR_CH_MAX], int out_count, const uint8_t *in_arg[SWR_CH_MAX], int in_count)
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
static double get_clock(Clock *c)
#define EXTERNAL_CLOCK_SPEED_MIN
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
MyAVPacketList * last_pkt
static unsigned sws_flags
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void(*parse_arg_function)(void *, const char *))
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
static SDL_Renderer * renderer
int av_usleep(unsigned usec)
Sleep for a period of time.
The libswresample context.
static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp)
struct SwrContext * swr_alloc_set_opts(struct SwrContext *s, int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
#define AV_PIX_FMT_BGR32_1
void av_rdft_calc(RDFTContext *s, FFTSample *data)
static int synchronize_audio(VideoState *is, int nb_samples)
static const char * window_title
@ AVDISCARD_ALL
discard all
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
void init_dynload(void)
Initialize dynamic library loading.
AVCodecParameters * codecpar
Codec parameters associated with this stream.
int w
width of pict, undefined when pict is not set
static void seek_chapter(VideoState *is, int incr)
static int get_master_sync_type(VideoState *is)
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
static VideoState * stream_open(const char *filename, AVInputFormat *iformat)
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Rational number (pair of numerator and denominator).
AVFilterContext ** filters
static void stream_cycle_channel(VideoState *is, int codec_type)
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
AVIOContext * pb
I/O context.
void av_log_set_flags(int arg)
static void frame_queue_unref_item(Frame *vp)
Frame queue[FRAME_QUEUE_SIZE]
void init_opts(void)
Initialize the cmdutils option system, in particular allocate the *_opts contexts.
static int64_t cursor_last_shown
unsigned int * stream_index
static Frame * frame_queue_peek(FrameQueue *f)
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
double frame_last_returned_time
static void set_clock_at(Clock *c, double pts, int serial, double time)
static void toggle_pause(VideoState *is)
static int stream_component_open(VideoState *is, int stream_index)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
#define AV_PIX_FMT_NE(be, le)
static void event_loop(VideoState *cur_stream)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
int sample_rate
Audio only.
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
int16_t sample_array[SAMPLE_ARRAY_SIZE]
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
static int exit_on_mousedown
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
AVDictionary * codec_opts
static int64_t audio_callback_time
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
static int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
int flags2
AV_CODEC_FLAG2_*.
enum AVPictureType pict_type
Picture type of the frame.
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
static void copy(const float *p1, float *p2, const int length)
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
@ AVDISCARD_DEFAULT
discard useless packets like 0 size packets in avi
static Frame * frame_queue_peek_writable(FrameQueue *f)
int sample_rate
Sample rate of the audio data.
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
static int64_t start_time
enum AVSampleFormat sample_fmt
audio sample format
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
static av_const double hypot(double x, double y)
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents.
static AVRational av_make_q(int num, int den)
Create an AVRational.
static int read_thread(void *arg)
#define AV_PIX_FMT_BGR555
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
#define AV_NOPTS_VALUE
Undefined timestamp value.
SDL_Texture * sub_texture
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
SDL_Texture * vid_texture
AVDictionary ** setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts)
Setup AVCodecContext options for avformat_find_stream_info().
printf("static const uint8_t my_array[100] = {\n")
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
static const char * wanted_stream_spec[AVMEDIA_TYPE_NB]
static int infinite_buffer
const char const char void * val
double max_frame_duration
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
static void packet_queue_destroy(PacketQueue *q)
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
uint64_t channel_layout
Channel layout of the audio data.
static void toggle_mute(VideoState *is)
static void decoder_abort(Decoder *d, FrameQueue *fq)
static void video_refresh(void *opaque, double *remaining_time)
#define ns(max_value, name, subs,...)
static float seek_interval
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
static void frame_queue_push(FrameQueue *f)
static SDL_AudioDeviceID audio_dev
static void sigterm_handler(int sig)
#define AV_LOG_INFO
Standard information.
static void packet_queue_abort(PacketQueue *q)
static const char * video_codec_name
int channels
number of audio channels
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
static void packet_queue_flush(PacketQueue *q)
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
@ AVCOL_SPC_SMPTE240M
functionally identical to above
int queue_attachments_req
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
ff_const59 AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
int nb_samples
number of audio samples (per channel) described by this frame
#define VIDEO_PICTURE_QUEUE_SIZE
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
enum VideoState::ShowMode show_mode
struct AudioParams audio_src
const int program_birth_year
program birth year, defined by the program for show_banner()
int avformat_open_input(AVFormatContext **ps, const char *url, ff_const59 AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs, void *log_ctx)
Add a graph described by a string to a graph.
static int compute_mod(int a, int b)
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
#define AV_TIME_BASE
Internal time base represented as integer.
uint8_t ** extended_data
pointers to the data planes/channels.
static AVInputFormat * file_iformat
#define av_malloc_array(a, b)
static int video_open(VideoState *is)
static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
static int opt_format(void *optctx, const char *opt, const char *arg)
AVSampleFormat
Audio sample formats.
#define AV_PIX_FMT_RGB555
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
static void update_sample_display(VideoState *is, short *samples, int samples_size)
@ AV_SAMPLE_FMT_S16
signed 16 bits
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
New fields can be added to the end with minor version bumps.
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
#define AV_PIX_FMT_BGR565
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
const char * name
Pad name.
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
MyAVPacketList * first_pkt
static Frame * frame_queue_peek_readable(FrameQueue *f)
#define AV_PIX_FMT_RGB565
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
int disposition
AV_DISPOSITION_* bit field.
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
#define EXTERNAL_CLOCK_MAX_FRAMES
int h
height of pict, undefined when pict is not set
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AV_PIX_FMT_0RGB32
static AVStream * video_stream
static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
static int filter_nbthreads
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
static int find_stream_info
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
static AVInputFormat * iformat
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
#define FF_ARRAY_ELEMS(a)
static int opt_width(void *optctx, const char *opt, const char *arg)
int main(int argc, char **argv)
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
static void show_usage(void)
AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
main external API structure.
#define CMDUTILS_COMMON_OPTIONS
static void packet_queue_start(PacketQueue *q)
static const char * audio_codec_name
double get_rotation(AVStream *st)
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
#define AV_SYNC_FRAMEDUP_THRESHOLD
static enum ShowMode show_mode
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
static const OptionDef options[]
static void fill_rectangle(int x, int y, int w, int h)
static AVPacket flush_pkt
unsigned int audio_buf1_size
#define AV_SYNC_THRESHOLD_MAX
int av_buffersink_get_channels(const AVFilterContext *ctx)
static void decoder_destroy(Decoder *d)
enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
Get the packed alternative form of the given sample format.
int eof_reached
true if was unable to read due to error or eof
static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue)
Filter the word “frame” indicates either a video frame or a group of audio samples
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
#define GROW_ARRAY(array, nb_elems)
#define SUBPICTURE_QUEUE_SIZE
static const char * input_filename
static void stream_toggle_pause(VideoState *is)
SDL_cond * continue_read_thread
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set that converts the value to a string and stores it.
static void toggle_audio_display(VideoState *is)
enum AVMediaType codec_type
int opt_default(void *optctx, const char *opt, const char *arg)
Fallback for options that are not explicitly handled, these will be parsed through AVOptions.
char * av_strdup(const char *s)
Duplicate a string.
AVS_VideoFrame int int int int new_height
static int get_video_frame(VideoState *is, AVFrame *frame)
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
struct SwsContext * sub_convert_ctx
static av_always_inline int diff(const uint32_t a, const uint32_t b)
This structure stores compressed data.
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
static const uint16_t channel_layouts[7]
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
static void stream_component_close(VideoState *is, int stream_index)
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds.
unsigned int audio_buf_size
#define flags(name, subs,...)
void av_rdft_end(RDFTContext *s)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
struct MyAVPacketList * next
The exact code depends on how similar the blocks are and how related they are to the block
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds.
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base.
static Frame * frame_queue_peek_last(FrameQueue *f)
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags, int alt_flags)
Print help for all options matching specified flags.
static int decoder_reorder_pts
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
struct AudioParams audio_tgt
static AVStream * audio_stream
const AVClass * avfilter_get_class(void)
A linked-list of the inputs/outputs of the filter chain.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are short
static void video_audio_display(VideoState *s)
#define AV_SYNC_THRESHOLD_MIN
static void check_external_clock_speed(VideoState *is)
uint32_t start_display_time
#define SAMPLE_CORRECTION_PERCENT_MAX
#define EXTERNAL_CLOCK_SPEED_MAX
static int is_realtime(AVFormatContext *s)
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
static int decode_interrupt_cb(void *ctx)
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
int avio_feof(AVIOContext *s)
Similar to feof() but also returns nonzero on read errors.
static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
#define AV_PIX_FMT_RGB444
static int exit_on_keydown