Go to the documentation of this file.
27 #include "config_components.h"
56 #include <SDL_thread.h>
65 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define EXTERNAL_CLOCK_MIN_FRAMES 2
68 #define EXTERNAL_CLOCK_MAX_FRAMES 10
71 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
73 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
76 #define SDL_VOLUME_STEP (0.75)
79 #define AV_SYNC_THRESHOLD_MIN 0.04
81 #define AV_SYNC_THRESHOLD_MAX 0.1
83 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
85 #define AV_NOSYNC_THRESHOLD 10.0
88 #define SAMPLE_CORRECTION_PERCENT_MAX 10
91 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
92 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
93 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
96 #define AUDIO_DIFF_AVG_NB 20
99 #define REFRESH_RATE 0.01
103 #define SAMPLE_ARRAY_SIZE (8 * 65536)
105 #define CURSOR_HIDE_DELAY 1000000
107 #define USE_ONEPASS_SUBTITLE_RENDER 1
125 #define VIDEO_PICTURE_QUEUE_SIZE 3
126 #define SUBPICTURE_QUEUE_SIZE 16
127 #define SAMPLE_QUEUE_SIZE 9
128 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
362 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
414 if (channel_count1 == 1 && channel_count2 == 1)
417 return channel_count1 != channel_count2 || fmt1 != fmt2;
439 SDL_CondSignal(q->
cond);
455 SDL_LockMutex(q->
mutex);
457 SDL_UnlockMutex(q->
mutex);
478 q->
mutex = SDL_CreateMutex();
483 q->
cond = SDL_CreateCond();
496 SDL_LockMutex(q->
mutex);
503 SDL_UnlockMutex(q->
mutex);
510 SDL_DestroyMutex(q->
mutex);
511 SDL_DestroyCond(q->
cond);
516 SDL_LockMutex(q->
mutex);
520 SDL_CondSignal(q->
cond);
522 SDL_UnlockMutex(q->
mutex);
527 SDL_LockMutex(q->
mutex);
530 SDL_UnlockMutex(q->
mutex);
539 SDL_LockMutex(q->
mutex);
564 SDL_UnlockMutex(q->
mutex);
653 if (got_frame && !d->
pkt->
data) {
671 av_log(d->
avctx,
AV_LOG_ERROR,
"Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
695 if (!(
f->mutex = SDL_CreateMutex())) {
699 if (!(
f->cond = SDL_CreateCond())) {
705 f->keep_last = !!keep_last;
706 for (
i = 0;
i <
f->max_size;
i++)
715 for (
i = 0;
i <
f->max_size;
i++) {
720 SDL_DestroyMutex(
f->mutex);
721 SDL_DestroyCond(
f->cond);
726 SDL_LockMutex(
f->mutex);
727 SDL_CondSignal(
f->cond);
728 SDL_UnlockMutex(
f->mutex);
733 return &
f->queue[(
f->rindex +
f->rindex_shown) %
f->max_size];
738 return &
f->queue[(
f->rindex +
f->rindex_shown + 1) %
f->max_size];
743 return &
f->queue[
f->rindex];
749 SDL_LockMutex(
f->mutex);
750 while (
f->size >=
f->max_size &&
751 !
f->pktq->abort_request) {
752 SDL_CondWait(
f->cond,
f->mutex);
754 SDL_UnlockMutex(
f->mutex);
756 if (
f->pktq->abort_request)
759 return &
f->queue[
f->windex];
765 SDL_LockMutex(
f->mutex);
766 while (
f->size -
f->rindex_shown <= 0 &&
767 !
f->pktq->abort_request) {
768 SDL_CondWait(
f->cond,
f->mutex);
770 SDL_UnlockMutex(
f->mutex);
772 if (
f->pktq->abort_request)
775 return &
f->queue[(
f->rindex +
f->rindex_shown) %
f->max_size];
780 if (++
f->windex ==
f->max_size)
782 SDL_LockMutex(
f->mutex);
784 SDL_CondSignal(
f->cond);
785 SDL_UnlockMutex(
f->mutex);
790 if (
f->keep_last && !
f->rindex_shown) {
795 if (++
f->rindex ==
f->max_size)
797 SDL_LockMutex(
f->mutex);
799 SDL_CondSignal(
f->cond);
800 SDL_UnlockMutex(
f->mutex);
806 return f->size -
f->rindex_shown;
812 Frame *fp = &
f->queue[
f->rindex];
813 if (
f->rindex_shown && fp->
serial ==
f->pktq->serial)
839 static int realloc_texture(SDL_Texture **texture, Uint32 new_format,
int new_width,
int new_height, SDL_BlendMode blendmode,
int init_texture)
843 if (!*texture || SDL_QueryTexture(*texture, &
format, &access, &
w, &
h) < 0 || new_width !=
w || new_height !=
h || new_format !=
format) {
847 SDL_DestroyTexture(*texture);
848 if (!(*texture = SDL_CreateTexture(
renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
850 if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
853 if (SDL_LockTexture(*texture,
NULL, &pixels, &pitch) < 0)
855 memset(pixels, 0, pitch * new_height);
856 SDL_UnlockTexture(*texture);
858 av_log(
NULL,
AV_LOG_VERBOSE,
"Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
864 int scr_xleft,
int scr_ytop,
int scr_width,
int scr_height,
865 int pic_width,
int pic_height,
AVRational pic_sar)
878 if (
width > scr_width) {
882 x = (scr_width -
width) / 2;
883 y = (scr_height -
height) / 2;
884 rect->
x = scr_xleft + x;
885 rect->
y = scr_ytop + y;
893 *sdl_blendmode = SDL_BLENDMODE_NONE;
894 *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
899 *sdl_blendmode = SDL_BLENDMODE_BLEND;
912 SDL_BlendMode sdl_blendmode;
914 if (
realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt,
frame->width,
frame->height, sdl_blendmode, 0) < 0)
916 switch (sdl_pix_fmt) {
917 case SDL_PIXELFORMAT_IYUV:
918 if (
frame->linesize[0] > 0 &&
frame->linesize[1] > 0 &&
frame->linesize[2] > 0) {
922 }
else if (
frame->linesize[0] < 0 &&
frame->linesize[1] < 0 &&
frame->linesize[2] < 0) {
932 if (
frame->linesize[0] < 0) {
955 #if SDL_VERSION_ATLEAST(2,0,8)
956 SDL_YUV_CONVERSION_MODE
mode = SDL_YUV_CONVERSION_AUTOMATIC;
959 mode = SDL_YUV_CONVERSION_JPEG;
961 mode = SDL_YUV_CONVERSION_BT709;
963 mode = SDL_YUV_CONVERSION_BT601;
965 SDL_SetYUVConversionMode(
mode);
972 SDL_Rect *
rect = &
is->render_params.target_rect;
973 SDL_BlendMode blendMode;
975 if (!SDL_GetTextureBlendMode(
is->vid_texture, &blendMode) && blendMode == SDL_BLENDMODE_BLEND) {
976 switch (
is->render_params.video_background_type) {
978 SDL_SetRenderDrawColor(
renderer, 237, 237, 237, 255);
980 SDL_SetRenderDrawColor(
renderer, 222, 222, 222, 255);
981 for (
int x = 0; x <
rect->
w; x += tile_size * 2)
983 for (
int y = 0; y <
rect->
h; y += tile_size * 2)
985 SDL_SetRenderDrawColor(
renderer, 237, 237, 237, 255);
986 for (
int y = 0; y <
rect->
h; y += tile_size * 2) {
988 for (
int x = 0; x <
rect->
w; x += tile_size * 2)
993 const uint8_t *
c =
is->render_params.video_background_color;
994 SDL_SetRenderDrawColor(
renderer,
c[0],
c[1],
c[2],
c[3]);
999 SDL_SetTextureBlendMode(
is->vid_texture, SDL_BLENDMODE_NONE);
1009 SDL_Rect *
rect = &
is->render_params.target_rect;
1018 if (
is->subtitle_st) {
1046 if (!
is->sub_convert_ctx) {
1050 if (!SDL_LockTexture(
is->sub_texture, (SDL_Rect *)sub_rect, (
void **)pixels, pitch)) {
1051 sws_scale(
is->sub_convert_ctx, (
const uint8_t *
const *)sub_rect->data, sub_rect->linesize,
1052 0, sub_rect->h, pixels, pitch);
1053 SDL_UnlockTexture(
is->sub_texture);
1078 #if USE_ONEPASS_SUBTITLE_RENDER
1085 SDL_Rect *sub_rect = (SDL_Rect*)sp->
sub.
rects[
i];
1086 SDL_Rect target = {.
x =
rect.
x + sub_rect->x * xratio,
1087 .y =
rect.
y + sub_rect->y * yratio,
1088 .w = sub_rect->w * xratio,
1089 .h = sub_rect->h * yratio};
1090 SDL_RenderCopy(
renderer,
is->sub_texture, sub_rect, &target);
1098 return a < 0 ?
a%
b +
b :
a%
b;
1103 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1106 int rdft_bits, nb_freq;
1108 for (rdft_bits = 1; (1 << rdft_bits) < 2 *
s->height; rdft_bits++)
1110 nb_freq = 1 << (rdft_bits - 1);
1113 channels =
s->audio_tgt.ch_layout.nb_channels;
1116 int data_used=
s->show_mode == SHOW_MODE_WAVES ?
s->width : (2*nb_freq);
1118 delay =
s->audio_write_buf_size;
1125 delay -= (time_diff *
s->audio_tgt.freq) / 1000000;
1128 delay += 2 * data_used;
1129 if (delay < data_used)
1133 if (
s->show_mode == SHOW_MODE_WAVES) {
1137 int a =
s->sample_array[idx];
1142 if (
h < score && (
b ^
c) < 0) {
1149 s->last_i_start = i_start;
1151 i_start =
s->last_i_start;
1154 if (
s->show_mode == SHOW_MODE_WAVES) {
1155 SDL_SetRenderDrawColor(
renderer, 255, 255, 255, 255);
1158 h =
s->height / nb_display_channels;
1161 for (ch = 0; ch < nb_display_channels; ch++) {
1163 y1 =
s->ytop + ch *
h + (
h / 2);
1164 for (x = 0; x <
s->width; x++) {
1165 y = (
s->sample_array[
i] * h2) >> 15;
1179 SDL_SetRenderDrawColor(
renderer, 0, 0, 255, 255);
1181 for (ch = 1; ch < nb_display_channels; ch++) {
1182 y =
s->ytop + ch *
h;
1187 if (
realloc_texture(&
s->vis_texture, SDL_PIXELFORMAT_ARGB8888,
s->width,
s->height, SDL_BLENDMODE_NONE, 1) < 0)
1190 if (
s->xpos >=
s->width)
1192 nb_display_channels=
FFMIN(nb_display_channels, 2);
1193 if (rdft_bits !=
s->rdft_bits) {
1194 const float rdft_scale = 1.0;
1198 s->rdft_bits = rdft_bits;
1202 0, 1 << rdft_bits, &rdft_scale, 0);
1204 if (err < 0 || !s->rdft_data) {
1206 s->show_mode = SHOW_MODE_WAVES;
1210 SDL_Rect
rect = {.
x =
s->xpos, .y = 0, .w = 1, .h =
s->height};
1213 for (ch = 0; ch < nb_display_channels; ch++) {
1214 data_in[ch] =
s->real_data + 2 * nb_freq * ch;
1215 data[ch] =
s->rdft_data + nb_freq * ch;
1217 for (x = 0; x < 2 * nb_freq; x++) {
1218 double w = (x-nb_freq) * (1.0 / nb_freq);
1219 data_in[ch][x] =
s->sample_array[
i] * (1.0 -
w *
w);
1224 s->rdft_fn(
s->rdft,
data[ch], data_in[ch],
sizeof(
float));
1225 data[ch][0].im =
data[ch][nb_freq].re;
1226 data[ch][nb_freq].re = 0;
1230 if (!SDL_LockTexture(
s->vis_texture, &
rect, (
void **)&pixels, &pitch)) {
1232 pixels += pitch *
s->height;
1233 for (y = 0; y <
s->height; y++) {
1234 double w = 1 / sqrt(nb_freq);
1235 int a = sqrt(
w * sqrt(
data[0][y].re *
data[0][y].re +
data[0][y].im *
data[0][y].im));
1236 int b = (nb_display_channels == 2 ) ? sqrt(
w *
hypot(
data[1][y].re,
data[1][y].im))
1241 *pixels = (
a << 16) + (
b << 8) + ((
a+
b) >> 1);
1243 SDL_UnlockTexture(
s->vis_texture);
1257 if (stream_index < 0 || stream_index >= ic->
nb_streams)
1268 is->audio_buf1_size = 0;
1295 is->audio_stream = -1;
1299 is->video_stream = -1;
1303 is->subtitle_stream = -1;
1313 is->abort_request = 1;
1314 SDL_WaitThread(
is->read_tid,
NULL);
1317 if (
is->audio_stream >= 0)
1319 if (
is->video_stream >= 0)
1321 if (
is->subtitle_stream >= 0)
1334 SDL_DestroyCond(
is->continue_read_thread);
1337 if (
is->vis_texture)
1338 SDL_DestroyTexture(
is->vis_texture);
1339 if (
is->vid_texture)
1340 SDL_DestroyTexture(
is->vid_texture);
1341 if (
is->sub_texture)
1342 SDL_DestroyTexture(
is->sub_texture);
1356 SDL_DestroyWindow(
window);
1383 if (max_width == INT_MAX && max_height == INT_MAX)
1404 SDL_SetWindowFullscreen(
window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1419 SDL_SetRenderDrawColor(
renderer, 0, 0, 0, 255);
1421 if (
is->audio_st &&
is->show_mode != SHOW_MODE_VIDEO)
1423 else if (
is->video_st)
1430 if (*
c->queue_serial !=
c->serial)
1436 return c->pts_drift + time - (time -
c->last_updated) * (1.0 -
c->speed);
1443 c->last_updated = time;
1444 c->pts_drift =
c->pts - time;
1464 c->queue_serial = queue_serial;
1519 double speed =
is->extclk.speed;
1528 if (!
is->seek_req) {
1535 SDL_CondSignal(
is->continue_read_thread);
1544 if (
is->read_pause_return !=
AVERROR(ENOSYS)) {
1545 is->vidclk.paused = 0;
1550 is->paused =
is->audclk.paused =
is->vidclk.paused =
is->extclk.paused = !
is->paused;
1561 is->muted = !
is->muted;
1566 double volume_level =
is->audio_volume ? (20 *
log(
is->audio_volume / (
double)SDL_MIX_MAXVOLUME) /
log(10)) : -1000.0;
1567 int new_volume =
lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign *
step) / 20.0));
1568 is->audio_volume =
av_clip(
is->audio_volume == new_volume ? (
is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1581 double sync_threshold,
diff = 0;
1594 if (
diff <= -sync_threshold)
1597 delay = delay +
diff;
1598 else if (
diff >= sync_threshold)
1612 if (
isnan(
duration) || duration <= 0 || duration >
is->max_frame_duration)
1641 if (
is->force_refresh ||
is->last_vis_time +
rdftspeed < time) {
1643 is->last_vis_time = time;
1645 *remaining_time =
FFMIN(*remaining_time,
is->last_vis_time +
rdftspeed - time);
1653 double last_duration,
duration, delay;
1660 if (vp->
serial !=
is->videoq.serial) {
1676 if (time < is->frame_timer + delay) {
1677 *remaining_time =
FFMIN(
is->frame_timer + delay - time, *remaining_time);
1681 is->frame_timer += delay;
1683 is->frame_timer = time;
1685 SDL_LockMutex(
is->pictq.mutex);
1688 SDL_UnlockMutex(
is->pictq.mutex);
1694 is->frame_drops_late++;
1700 if (
is->subtitle_st) {
1709 if (sp->
serial !=
is->subtitleq.serial
1720 if (!SDL_LockTexture(
is->sub_texture, (SDL_Rect *)sub_rect, (
void **)&pixels, &pitch)) {
1721 for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1722 memset(pixels, 0, sub_rect->w << 2);
1723 SDL_UnlockTexture(
is->sub_texture);
1735 is->force_refresh = 1;
1737 if (
is->step && !
is->paused)
1742 if (!
display_disable &&
is->force_refresh &&
is->show_mode == SHOW_MODE_VIDEO &&
is->pictq.rindex_shown)
1745 is->force_refresh = 0;
1750 int aqsize, vqsize, sqsize;
1754 if (!last_time || (cur_time - last_time) >= 30000) {
1759 aqsize =
is->audioq.size;
1761 vqsize =
is->videoq.size;
1762 if (
is->subtitle_st)
1763 sqsize =
is->subtitleq.size;
1765 if (
is->audio_st &&
is->video_st)
1767 else if (
is->video_st)
1769 else if (
is->audio_st)
1774 "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB \r",
1776 (
is->audio_st &&
is->video_st) ?
"A-V" : (
is->video_st ?
"M-V" : (
is->audio_st ?
"M-A" :
" ")),
1778 is->frame_drops_early +
is->frame_drops_late,
1784 fprintf(stderr,
"%s", buf.str);
1791 last_time = cur_time;
1800 #if defined(DEBUG_SYNC)
1801 printf(
"frame_type=%c pts=%0.3f\n",
1846 diff -
is->frame_last_filter_delay < 0 &&
1847 is->viddec.pkt_serial ==
is->vidclk.serial &&
1848 is->videoq.nb_packets) {
1849 is->frame_drops_early++;
1876 outputs->filter_ctx = source_ctx;
1881 inputs->filter_ctx = sink_ctx;
1906 char sws_flags_str[512] =
"";
1912 int nb_pix_fmts = 0;
1929 if (!strcmp(e->
key,
"sws_flags")) {
1930 av_strlcatf(sws_flags_str,
sizeof(sws_flags_str),
"%s=%s:",
"flags", e->
value);
1934 if (strlen(sws_flags_str))
1935 sws_flags_str[strlen(sws_flags_str)-1] =
'\0';
1966 "ffplay_buffersink");
1990 last_filter = filt_out;
1994 #define INSERT_FILT(name, arg) do { \
1995 AVFilterContext *filt_ctx; \
1997 ret = avfilter_graph_create_filter(&filt_ctx, \
1998 avfilter_get_by_name(name), \
1999 "ffplay_" name, arg, NULL, graph); \
2003 ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
2007 last_filter = filt_ctx; \
2016 if (!displaymatrix) {
2018 is->video_st->codecpar->nb_coded_side_data,
2025 if (
fabs(theta - 90) < 1.0) {
2026 INSERT_FILT(
"transpose", displaymatrix[3] > 0 ?
"cclock_flip" :
"clock");
2027 }
else if (
fabs(theta - 180) < 1.0) {
2028 if (displaymatrix[0] < 0)
2030 if (displaymatrix[4] < 0)
2032 }
else if (
fabs(theta - 270) < 1.0) {
2033 INSERT_FILT(
"transpose", displaymatrix[3] < 0 ?
"clock_flip" :
"cclock");
2034 }
else if (
fabs(theta) > 1.0) {
2035 char rotate_buf[64];
2036 snprintf(rotate_buf,
sizeof(rotate_buf),
"%f*PI/180", theta);
2039 if (displaymatrix && displaymatrix[4] < 0)
2047 is->in_video_filter = filt_src;
2048 is->out_video_filter = filt_out;
2058 char aresample_swr_opts[512] =
"";
2061 char asrc_args[256];
2073 if (strlen(aresample_swr_opts))
2074 aresample_swr_opts[strlen(aresample_swr_opts)-1] =
'\0';
2075 av_opt_set(
is->agraph,
"aresample_swr_opts", aresample_swr_opts, 0);
2080 "sample_rate=%d:sample_fmt=%s:time_base=%d/%d:channel_layout=%s",
2082 1,
is->audio_filter_src.freq, bp.str);
2086 asrc_args,
NULL,
is->agraph);
2091 "ffplay_abuffersink");
2100 if (force_output_format) {
2116 is->in_audio_filter = filt_asrc;
2117 is->out_audio_filter = filt_asink;
2132 int last_serial = -1;
2150 frame->format,
frame->ch_layout.nb_channels) ||
2152 is->audio_filter_src.freq !=
frame->sample_rate ||
2153 is->auddec.pkt_serial != last_serial;
2156 char buf1[1024], buf2[1024];
2160 "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2161 is->audio_filter_src.freq,
is->audio_filter_src.ch_layout.nb_channels,
av_get_sample_fmt_name(
is->audio_filter_src.fmt), buf1, last_serial,
2164 is->audio_filter_src.fmt =
frame->format;
2168 is->audio_filter_src.freq =
frame->sample_rate;
2169 last_serial =
is->auddec.pkt_serial;
2186 af->
serial =
is->auddec.pkt_serial;
2192 if (
is->audioq.serial !=
is->auddec.pkt_serial)
2196 is->auddec.finished =
is->auddec.pkt_serial;
2231 int last_serial = -1;
2232 int last_vfilter_idx = 0;
2244 if ( last_w !=
frame->width
2245 || last_h !=
frame->height
2246 || last_format !=
frame->format
2247 || last_serial !=
is->viddec.pkt_serial
2248 || last_vfilter_idx !=
is->vfilter_idx) {
2250 "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2265 event.user.data1 =
is;
2266 SDL_PushEvent(&event);
2269 filt_in =
is->in_video_filter;
2270 filt_out =
is->out_video_filter;
2271 last_w =
frame->width;
2272 last_h =
frame->height;
2273 last_format =
frame->format;
2274 last_serial =
is->viddec.pkt_serial;
2275 last_vfilter_idx =
is->vfilter_idx;
2291 is->viddec.finished =
is->viddec.pkt_serial;
2300 is->frame_last_filter_delay = 0;
2306 if (
is->videoq.serial !=
is->viddec.pkt_serial)
2335 if (got_subtitle && sp->
sub.
format == 0) {
2339 sp->
serial =
is->subdec.pkt_serial;
2340 sp->
width =
is->subdec.avctx->width;
2341 sp->
height =
is->subdec.avctx->height;
2346 }
else if (got_subtitle) {
2363 memcpy(
is->sample_array +
is->sample_array_index,
samples,
len *
sizeof(
short));
2365 is->sample_array_index +=
len;
2367 is->sample_array_index = 0;
2376 int wanted_nb_samples = nb_samples;
2380 double diff, avg_diff;
2381 int min_nb_samples, max_nb_samples;
2386 is->audio_diff_cum =
diff +
is->audio_diff_avg_coef *
is->audio_diff_cum;
2389 is->audio_diff_avg_count++;
2392 avg_diff =
is->audio_diff_cum * (1.0 -
is->audio_diff_avg_coef);
2394 if (
fabs(avg_diff) >=
is->audio_diff_threshold) {
2395 wanted_nb_samples = nb_samples + (int)(
diff *
is->audio_src.freq);
2398 wanted_nb_samples =
av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2401 diff, avg_diff, wanted_nb_samples - nb_samples,
2402 is->audio_clock,
is->audio_diff_threshold);
2407 is->audio_diff_avg_count = 0;
2408 is->audio_diff_cum = 0;
2412 return wanted_nb_samples;
2424 int data_size, resampled_data_size;
2426 int wanted_nb_samples;
2443 }
while (af->
serial !=
is->audioq.serial);
2458 &
is->audio_tgt.ch_layout,
is->audio_tgt.fmt,
is->audio_tgt.freq,
2463 "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2477 uint8_t **
out = &
is->audio_buf1;
2493 if (!
is->audio_buf1)
2500 if (len2 == out_count) {
2505 is->audio_buf =
is->audio_buf1;
2509 resampled_data_size = data_size;
2512 audio_clock0 =
is->audio_clock;
2517 is->audio_clock =
NAN;
2518 is->audio_clock_serial = af->
serial;
2521 static double last_clock;
2522 printf(
"audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2523 is->audio_clock - last_clock,
2524 is->audio_clock, audio_clock0);
2525 last_clock =
is->audio_clock;
2528 return resampled_data_size;
2535 int audio_size, len1;
2540 if (
is->audio_buf_index >=
is->audio_buf_size) {
2542 if (audio_size < 0) {
2547 if (
is->show_mode != SHOW_MODE_VIDEO)
2549 is->audio_buf_size = audio_size;
2551 is->audio_buf_index = 0;
2553 len1 =
is->audio_buf_size -
is->audio_buf_index;
2556 if (!
is->muted &&
is->audio_buf &&
is->audio_volume == SDL_MIX_MAXVOLUME)
2557 memcpy(stream, (uint8_t *)
is->audio_buf +
is->audio_buf_index, len1);
2559 memset(stream, 0, len1);
2560 if (!
is->muted &&
is->audio_buf)
2561 SDL_MixAudioFormat(stream, (uint8_t *)
is->audio_buf +
is->audio_buf_index, AUDIO_S16SYS, len1,
is->audio_volume);
2565 is->audio_buf_index += len1;
2567 is->audio_write_buf_size =
is->audio_buf_size -
is->audio_buf_index;
2569 if (!
isnan(
is->audio_clock)) {
2577 SDL_AudioSpec wanted_spec, spec;
2579 static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2580 static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2581 int next_sample_rate_idx =
FF_ARRAY_ELEMS(next_sample_rates) - 1;
2582 int wanted_nb_channels = wanted_channel_layout->
nb_channels;
2584 env = SDL_getenv(
"SDL_AUDIO_CHANNELS");
2586 wanted_nb_channels = atoi(env);
2594 wanted_nb_channels = wanted_channel_layout->
nb_channels;
2595 wanted_spec.channels = wanted_nb_channels;
2596 wanted_spec.freq = wanted_sample_rate;
2597 if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2601 while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2602 next_sample_rate_idx--;
2603 wanted_spec.format = AUDIO_S16SYS;
2604 wanted_spec.silence = 0;
2607 wanted_spec.userdata = opaque;
2608 while (!(
audio_dev = SDL_OpenAudioDevice(
NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2610 wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2611 wanted_spec.channels = next_nb_channels[
FFMIN(7, wanted_spec.channels)];
2612 if (!wanted_spec.channels) {
2613 wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2614 wanted_spec.channels = wanted_nb_channels;
2615 if (!wanted_spec.freq) {
2617 "No more combinations to try, audio open failed\n");
2623 if (spec.format != AUDIO_S16SYS) {
2625 "SDL advised audio format %d is not supported!\n", spec.format);
2628 if (spec.channels != wanted_spec.channels) {
2633 "SDL advised channel count %d is not supported!\n", spec.channels);
2639 audio_hw_params->
freq = spec.freq;
2693 const char *forced_codec_name =
NULL;
2698 int stream_lowres =
lowres;
2700 if (stream_index < 0 || stream_index >= ic->
nb_streams)
2719 if (forced_codec_name)
2723 "No codec could be found with name '%s'\n", forced_codec_name);
2736 avctx->
lowres = stream_lowres;
2780 sink =
is->out_audio_filter;
2790 is->audio_hw_buf_size =
ret;
2791 is->audio_src =
is->audio_tgt;
2792 is->audio_buf_size = 0;
2793 is->audio_buf_index = 0;
2797 is->audio_diff_avg_count = 0;
2800 is->audio_diff_threshold = (
double)(
is->audio_hw_buf_size) /
is->audio_tgt.bytes_per_sec;
2802 is->audio_stream = stream_index;
2803 is->audio_st = ic->
streams[stream_index];
2808 is->auddec.start_pts =
is->audio_st->start_time;
2809 is->auddec.start_pts_tb =
is->audio_st->time_base;
2816 is->video_stream = stream_index;
2817 is->video_st = ic->
streams[stream_index];
2823 is->queue_attachments_req = 1;
2826 is->subtitle_stream = stream_index;
2827 is->subtitle_st = ic->
streams[stream_index];
2851 return is->abort_request;
2855 return stream_id < 0 ||
2863 if( !strcmp(
s->iformat->name,
"rtp")
2864 || !strcmp(
s->iformat->name,
"rtsp")
2865 || !strcmp(
s->iformat->name,
"sdp")
2869 if(
s->pb && ( !strncmp(
s->url,
"rtp:", 4)
2870 || !strncmp(
s->url,
"udp:", 4)
2886 char metadata_description[96];
2887 int pkt_in_play_range = 0;
2889 SDL_mutex *wait_mutex = SDL_CreateMutex();
2890 int scan_all_pmts_set = 0;
2899 memset(st_index, -1,
sizeof(st_index));
2918 scan_all_pmts_set = 1;
2926 if (scan_all_pmts_set)
2945 "Error setting up avformat_find_stream_info() options\n");
2952 for (
i = 0;
i < orig_nb_streams;
i++)
2958 "%s: could not find codec parameters\n",
is->filename);
2995 fprintf(stderr,
"\x1b[2K\r");
3013 st_index[
i] = INT_MAX;
3041 if (codecpar->
width)
3054 if (
is->show_mode == SHOW_MODE_NONE)
3055 is->show_mode =
ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
3061 if (
is->video_stream < 0 &&
is->audio_stream < 0) {
3068 if (infinite_buffer < 0 && is->realtime)
3072 if (
is->abort_request)
3074 if (
is->paused !=
is->last_paused) {
3075 is->last_paused =
is->paused;
3081 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
3093 int64_t seek_min =
is->seek_rel > 0 ? seek_target -
is->seek_rel + 2: INT64_MIN;
3094 int64_t seek_max =
is->seek_rel < 0 ? seek_target -
is->seek_rel - 2: INT64_MAX;
3101 "%s: error while seeking\n",
is->ic->url);
3103 if (
is->audio_stream >= 0)
3105 if (
is->subtitle_stream >= 0)
3107 if (
is->video_stream >= 0)
3116 is->queue_attachments_req = 1;
3121 if (
is->queue_attachments_req) {
3128 is->queue_attachments_req = 0;
3138 SDL_LockMutex(wait_mutex);
3139 SDL_CondWaitTimeout(
is->continue_read_thread, wait_mutex, 10);
3140 SDL_UnlockMutex(wait_mutex);
3156 if (
is->video_stream >= 0)
3158 if (
is->audio_stream >= 0)
3160 if (
is->subtitle_stream >= 0)
3170 SDL_LockMutex(wait_mutex);
3171 SDL_CondWaitTimeout(
is->continue_read_thread, wait_mutex, 10);
3172 SDL_UnlockMutex(wait_mutex);
3180 fprintf(stderr,
"\x1b[2K\r");
3182 sizeof(metadata_description),
3183 "\r New metadata for stream %d",
3194 (pkt_ts - (stream_start_time !=
AV_NOPTS_VALUE ? stream_start_time : 0)) *
3220 event.user.data1 =
is;
3221 SDL_PushEvent(&event);
3223 SDL_DestroyMutex(wait_mutex);
3235 is->last_video_stream =
is->video_stream = -1;
3236 is->last_audio_stream =
is->audio_stream = -1;
3237 is->last_subtitle_stream =
is->subtitle_stream = -1;
3258 if (!(
is->continue_read_thread = SDL_CreateCond())) {
3266 is->audio_clock_serial = -1;
3287 if (!
is->read_tid) {
3299 int start_index, stream_index;
3306 start_index =
is->last_video_stream;
3307 old_index =
is->video_stream;
3309 start_index =
is->last_audio_stream;
3310 old_index =
is->audio_stream;
3312 start_index =
is->last_subtitle_stream;
3313 old_index =
is->subtitle_stream;
3315 stream_index = start_index;
3321 for (start_index = 0; start_index <
nb_streams; start_index++)
3322 if (
p->stream_index[start_index] == stream_index)
3326 stream_index = start_index;
3336 is->last_subtitle_stream = -1;
3339 if (start_index == -1)
3343 if (stream_index == start_index)
3345 st =
is->ic->streams[
p ?
p->stream_index[stream_index] : stream_index];
3363 if (
p && stream_index != -1)
3364 stream_index =
p->stream_index[stream_index];
3383 int next =
is->show_mode;
3385 next = (next + 1) % SHOW_MODE_NB;
3386 }
while (next !=
is->show_mode && (next == SHOW_MODE_VIDEO && !
is->video_st || next != SHOW_MODE_VIDEO && !
is->audio_st));
3387 if (
is->show_mode != next) {
3388 is->force_refresh = 1;
3389 is->show_mode = next;
3394 double remaining_time = 0.0;
3396 while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3401 if (remaining_time > 0.0)
3404 if (
is->show_mode != SHOW_MODE_NONE && (!
is->paused ||
is->force_refresh))
3415 if (!
is->ic->nb_chapters)
3419 for (
i = 0;
i <
is->ic->nb_chapters;
i++) {
3429 if (
i >=
is->ic->nb_chapters)
3441 double incr,
pos, frac;
3446 switch (event.type) {
3448 if (
exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
3453 if (!cur_stream->
width)
3455 switch (event.key.keysym.sym) {
3467 case SDLK_KP_MULTIPLY:
3471 case SDLK_KP_DIVIDE:
3555 case SDL_MOUSEBUTTONDOWN:
3560 if (event.button.button == SDL_BUTTON_LEFT) {
3561 static int64_t last_mouse_left_click = 0;
3565 last_mouse_left_click = 0;
3570 case SDL_MOUSEMOTION:
3576 if (event.type == SDL_MOUSEBUTTONDOWN) {
3577 if (event.button.button != SDL_BUTTON_RIGHT)
3581 if (!(event.motion.state & SDL_BUTTON_RMASK))
3591 int tns, thh, tmm, tss;
3594 tmm = (tns % 3600) / 60;
3596 frac = x / cur_stream->
width;
3599 mm = (
ns % 3600) / 60;
3602 "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3603 hh, mm,
ss, thh, tmm, tss);
3610 case SDL_WINDOWEVENT:
3611 switch (event.window.event) {
3612 case SDL_WINDOWEVENT_SIZE_CHANGED:
3621 case SDL_WINDOWEVENT_EXPOSED:
3669 if (!strcmp(
arg,
"audio"))
3671 else if (!strcmp(
arg,
"video"))
3673 else if (!strcmp(
arg,
"ext"))
3685 !strcmp(
arg,
"waves") ? SHOW_MODE_WAVES :
3686 !strcmp(
arg,
"rdft" ) ? SHOW_MODE_RDFT : SHOW_MODE_NONE;
3702 "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3706 if (!strcmp(filename,
"-"))
3717 const char *spec = strchr(opt,
':');
3721 "No media specifier was specified in '%s' in option '%s'\n",
3733 "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3790 "read and decode the streams to fill missing information with heuristics" },
3816 printf(
"\nWhile playing:\n"
3818 "f toggle full screen\n"
3821 "9, 0 decrease and increase volume respectively\n"
3822 "/, * decrease and increase volume respectively\n"
3823 "a cycle audio channel in the current program\n"
3824 "v cycle video channel\n"
3825 "t cycle subtitle channel in the current program\n"
3827 "w cycle video filters or show modes\n"
3828 "s activate frame-step mode\n"
3829 "left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
3830 "down/up seek backward/forward 1 minute\n"
3831 "page down/page up seek backward/forward 10 minutes\n"
3832 "right mouse click seek to percentage in file corresponding to fraction of width\n"
3833 "left double-click toggle full screen\n"
3867 "Use -h to get full help or, even better, run 'man %s'\n",
program_name);
3874 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3876 flags &= ~SDL_INIT_AUDIO;
3880 if (!SDL_getenv(
"SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3881 SDL_setenv(
"SDL_AUDIO_ALSA_SET_BUFFER_SIZE",
"1", 1);
3884 flags &= ~SDL_INIT_VIDEO;
3885 if (SDL_Init (
flags)) {
3891 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3892 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3895 int flags = SDL_WINDOW_HIDDEN;
3897 #if SDL_VERSION_ATLEAST(2,0,5)
3898 flags |= SDL_WINDOW_ALWAYS_ON_TOP;
3900 av_log(
NULL,
AV_LOG_WARNING,
"Your SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.\n");
3903 flags |= SDL_WINDOW_BORDERLESS;
3905 flags |= SDL_WINDOW_RESIZABLE;
3907 #ifdef SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR
3908 SDL_SetHint(SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR,
"0");
3917 #if SDL_VERSION_ATLEAST(2, 0, 6)
3918 flags |= SDL_WINDOW_VULKAN;
3926 SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY,
"linear");
3949 renderer = SDL_CreateRenderer(
window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
static void do_exit(VideoState *is)
enum AVColorSpace color_space
Video only, the YUV colorspace and range.
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
#define VIDEO_BACKGROUND_TILE_SIZE
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags)
Print help for all options matching specified flags.
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
#define AV_LOG_WARNING
Something somehow does not look correct.
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
AVPixelFormat
Pixel format.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
__device__ int printf(const char *,...)
static SDL_RendererInfo renderer_info
double get_rotation(const int32_t *displaymatrix)
@ AVALPHA_MODE_STRAIGHT
Alpha channel is independent of color values.
static int frame_queue_nb_remaining(FrameQueue *f)
static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
static void frame_queue_next(FrameQueue *f)
enum AVMediaType codec_type
General type of the encoded data.
int nb_threads
Maximum number of threads used by filters in this graph.
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
int check_avoptions(AVDictionary *m)
static int64_t frame_queue_last_pos(FrameQueue *f)
int sample_rate
samples per second
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
static int video_thread(void *arg)
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
The official guide to swscale for confused that is
static void set_default_window_size(int width, int height, AVRational sar)
#define AV_NOSYNC_THRESHOLD
unsigned int nb_chapters
Number of chapters in AVChapter array.
This struct describes the properties of an encoded stream.
#define AV_LOG_QUIET
Print no output.
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
#define AVERROR_EOF
End of file.
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
uint8_t * data
The data buffer.
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
static int display_disable
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
#define SAMPLE_ARRAY_SIZE
static void update_volume(VideoState *is, int sign, double step)
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
char * av_asprintf(const char *fmt,...)
static int decoder_start(Decoder *d, int(*fn)(void *), const char *thread_name, void *arg)
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
void sws_freeContext(SwsContext *swsContext)
Free the swscaler context swsContext.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
SDL_Texture * vis_texture
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
This structure describes decoded (raw) audio or video data.
struct AudioParams audio_filter_src
AVStream ** streams
A list of all streams in the file.
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
double frame_last_filter_delay
This structure stores auxiliary information for decoding, presenting, or otherwise processing the cod...
@ AVCOL_RANGE_JPEG
Full range content.
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
static const char * subtitle_codec_name
int parse_number(const char *context, const char *numstr, enum OptionType type, double min, double max, double *dst)
Parse a string and return its corresponding value as a double.
#define EXTERNAL_CLOCK_MIN_FRAMES
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
int vk_renderer_create(VkRenderer *renderer, SDL_Window *window, AVDictionary *opt)
#define SAMPLE_QUEUE_SIZE
const char program_name[]
program name, defined by the program for show_version().
AVDictionary * format_opts
int error
contains the error code or 0 if no error happened
#define AV_PIX_FMT_RGB32_1
double audio_diff_avg_coef
enum AVHWDeviceType av_hwdevice_find_type_by_name(const char *name)
Look up an AVHWDeviceType by name.
#define AV_LOG_VERBOSE
Detailed information.
#define CURSOR_HIDE_DELAY
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
enum AVChannelOrder order
Channel order used in this layout.
static double compute_target_delay(double delay, VideoState *is)
static int opt_input_file(void *optctx, const char *filename)
static void stream_close(VideoState *is)
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
void vk_renderer_destroy(VkRenderer *renderer)
int nb_channels
Number of channels in this layout.
static void init_clock(Clock *c, int *queue_serial)
enum AVMediaType codec_type
int64_t avio_size(AVIOContext *s)
Get the filesize.
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
static double get_master_clock(VideoState *is)
static const AVInputFormat * file_iformat
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
static int subtitle_thread(void *arg)
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
static int subtitle_disable
struct SwrContext * swr_ctx
static int opt_sync(void *optctx, const char *opt, const char *arg)
static void step_to_next_frame(VideoState *is)
static int upload_texture(SDL_Texture **tex, AVFrame *frame)
enum AVPixelFormat format
static char * video_background
static void video_display(VideoState *is)
uint8_t max_lowres
maximum value for lowres supported by the decoder
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
#define SDL_AUDIO_MIN_BUFFER_SIZE
static int startup_volume
static SDL_Window * window
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
A convenience wrapper that allocates and initializes a filter in a single step.
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
static void toggle_full_screen(VideoState *is)
static int packet_queue_init(PacketQueue *q)
AVBufferRef * opaque_ref
AVBufferRef for free use by the API user.
#define AUDIO_DIFF_AVG_NB
AVChannelLayout ch_layout
Audio channel layout.
AVRational sample_aspect_ratio
Video only, the sample (pixel) aspect ratio.
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
int x
top left corner of pict, undefined when pict is not set
SwsContext * sws_getCachedContext(SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
#define AV_BPRINT_SIZE_AUTOMATIC
static void video_image_display(VideoState *is)
static double val(void *priv, double ch)
AVChannelLayout ch_layout
Channel layout of the audio data.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
SDL_cond * empty_queue_cond
static void set_clock_speed(Clock *c, double speed)
double audio_diff_threshold
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
#define ss(width, name, subs,...)
int avformat_network_init(void)
Do global initialization of network libraries.
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t *const *out_arg, int out_count, const uint8_t *const *in_arg, int in_count)
Convert audio.
static int opt_height(void *optctx, const char *opt, const char *arg)
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static const struct TextureFormatEntry sdl_texture_format_map[]
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
static int is_full_screen
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, const AVPacket *avpkt)
Decode a subtitle message.
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
static void set_sdl_yuv_conversion_mode(AVFrame *frame)
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
AVDictionary * metadata
Metadata that applies to the whole file.
VkRenderer * vk_get_renderer(void)
#define FF_ARRAY_ELEMS(a)
static int audio_thread(void *arg)
static void set_clock(Clock *c, double pts, int serial)
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
static Frame * frame_queue_peek_next(FrameQueue *f)
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
static void sync_clock_to_slave(Clock *c, Clock *slave)
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
int avformat_open_input(AVFormatContext **ps, const char *url, const AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
static void frame_queue_signal(FrameQueue *f)
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
int av_channel_layout_describe(const AVChannelLayout *channel_layout, char *buf, size_t buf_size)
Get a human-readable string describing the channel layout properties.
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
#define AV_CEIL_RSHIFT(a, b)
static int default_height
int flags
Flags modifying the (de)muxer behaviour.
AVRational sample_aspect_ratio
Video only.
const struct AVInputFormat * iformat
The input container format.
#define AV_PIX_FMT_0BGR32
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
int y
top left corner of pict, undefined when pict is not set
static AVStream * video_stream
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
static double av_q2d(AVRational a)
Convert an AVRational to a double.
#define EXTERNAL_CLOCK_SPEED_STEP
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Alias for avcodec_receive_frame_flags(avctx, frame, 0).
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
static enum AVPixelFormat pix_fmts[]
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static int opt_codec(void *optctx, const char *opt, const char *arg)
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
int vk_renderer_get_hw_dev(VkRenderer *renderer, AVBufferRef **dev)
int64_t pts
Same as packet pts, in AV_TIME_BASE.
static double get_clock(Clock *c)
int av_opt_set_array(void *obj, const char *name, int search_flags, unsigned int start_elem, unsigned int nb_elems, enum AVOptionType val_type, const void *val)
Add, replace, or remove elements for an array option.
#define EXTERNAL_CLOCK_SPEED_MIN
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
static SDL_Renderer * renderer
int av_usleep(unsigned usec)
Sleep for a period of time.
The libswresample context.
static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp)
#define AV_PIX_FMT_BGR32_1
static int synchronize_audio(VideoState *is, int nb_samples)
static const char * window_title
enum AVAlphaMode alpha_mode
Video only, the alpha mode.
@ AVDISCARD_ALL
discard all
int av_log_get_level(void)
Get the current log level.
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
void init_dynload(void)
Initialize dynamic library loading.
AVCodecParameters * codecpar
Codec parameters associated with this stream.
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
int avcodec_parameters_to_context(AVCodecContext *codec, const struct AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
int w
width of pict, undefined when pict is not set
static void seek_chapter(VideoState *is, int incr)
static int get_master_sync_type(VideoState *is)
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
static __device__ float fabs(float a)
static void frame_queue_destroy(FrameQueue *f)
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
const AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image format
#define AV_DICT_MULTIKEY
Allow to store several equal keys in the dictionary.
RenderParams render_params
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Rational number (pair of numerator and denominator).
AVChannelLayout ch_layout
static int audio_open(void *opaque, AVChannelLayout *wanted_channel_layout, int wanted_sample_rate, struct AudioParams *audio_hw_params)
AVFilterContext ** filters
int filter_codec_opts(const AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, const AVCodec *codec, AVDictionary **dst, AVDictionary **opts_used)
Filter out options for given codec.
static void stream_cycle_channel(VideoState *is, int codec_type)
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
AVIOContext * pb
I/O context.
void av_log_set_flags(int arg)
static void frame_queue_unref_item(Frame *vp)
Frame queue[FRAME_QUEUE_SIZE]
static int64_t cursor_last_shown
static Frame * frame_queue_peek(FrameQueue *f)
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)3R 3G 2B(lsb)
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
double frame_last_returned_time
static void set_clock_at(Clock *c, double pts, int serial, double time)
static void toggle_pause(VideoState *is)
static int stream_component_open(VideoState *is, int stream_index)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
@ AV_OPT_TYPE_CHLAYOUT
Underlying C type is AVChannelLayout.
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
AVChannelLayout ch_layout
Audio only.
AVComplexFloat * rdft_data
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
#define AV_PIX_FMT_NE(be, le)
static void event_loop(VideoState *cur_stream)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
int sample_rate
Audio only.
static void draw_video_background(VideoState *is)
const AVPacketSideData * av_packet_side_data_get(const AVPacketSideData *sd, int nb_sd, enum AVPacketSideDataType type)
Get side information from a side data array.
const AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
int16_t sample_array[SAMPLE_ARRAY_SIZE]
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
static int exit_on_mousedown
AVRational frame_rate
Video only, the frame rate of the input video.
AVAlphaMode
Correlation between the alpha channel and color values.
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
const AVInputFormat * iformat
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
AVDictionary * codec_opts
static int64_t audio_callback_time
#define AV_OPT_FLAG_FILTERING_PARAM
A generic parameter which can be set by the user for filtering.
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
#define INSERT_FILT(name, arg)
int swr_alloc_set_opts2(struct SwrContext **ps, const AVChannelLayout *out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, const AVChannelLayout *in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
int flags2
AV_CODEC_FLAG2_*.
enum AVPictureType pict_type
Picture type of the frame.
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
AVFilterContext * in_audio_filter
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
@ AVDISCARD_DEFAULT
discard useless packets like 0 size packets in avi
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
An AVChannelLayout holds information about the channel layout of audio data.
static Frame * frame_queue_peek_writable(FrameQueue *f)
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
static const char ** vfilters_list
int sample_rate
Sample rate of the audio data.
static int create_hwaccel(AVBufferRef **device_ctx)
static int decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
static enum AVColorSpace sdl_supported_color_spaces[]
static int64_t start_time
static AVStream * audio_stream
enum AVSampleFormat sample_fmt
audio sample format
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are expressed.
static av_const double hypot(double x, double y)
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents.
static AVRational av_make_q(int num, int den)
Create an AVRational.
static int read_thread(void *arg)
#define AV_PIX_FMT_BGR555
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
#define AV_NOPTS_VALUE
Undefined timestamp value.
SDL_Texture * sub_texture
int event_flags
Flags indicating events happening on the stream, a combination of AVSTREAM_EVENT_FLAG_*.
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
SDL_Texture * vid_texture
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
static const char * wanted_stream_spec[AVMEDIA_TYPE_NB]
static int infinite_buffer
uint32_t end_display_time
double max_frame_duration
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
static void packet_queue_destroy(PacketQueue *q)
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
AVRational time_base
The timebase to be used for the timestamps on the input frames.
static void update_video_pts(VideoState *is, double pts, int serial)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
static void toggle_mute(VideoState *is)
static void decoder_abort(Decoder *d, FrameQueue *fq)
static void video_refresh(void *opaque, double *remaining_time)
@ AV_CHANNEL_ORDER_NATIVE
The native channel order, i.e.
static float seek_interval
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
static void frame_queue_push(FrameQueue *f)
static SDL_AudioDeviceID audio_dev
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
static void sigterm_handler(int sig)
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
#define AV_LOG_INFO
Standard information.
static void packet_queue_abort(PacketQueue *q)
static const char * video_codec_name
static const AVInputFormat * iformat
static void packet_queue_flush(PacketQueue *q)
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
int queue_attachments_req
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
int nb_samples
number of audio samples (per channel) described by this frame
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
int av_hwdevice_ctx_create_derived(AVBufferRef **dst_ref_ptr, enum AVHWDeviceType type, AVBufferRef *src_ref, int flags)
Create a new device of the specified type from an existing device.
#define VIDEO_PICTURE_QUEUE_SIZE
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
enum VideoState::ShowMode show_mode
int width
Video only, the display dimensions of the input frames.
struct AudioParams audio_src
const int program_birth_year
program birth year, defined by the program for show_banner()
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs, void *log_ctx)
Add a graph described by a string to a graph.
int setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *local_codec_opts, AVDictionary ***dst)
Setup AVCodecContext options for avformat_find_stream_info().
static int compute_mod(int a, int b)
#define AV_TIME_BASE
Internal time base represented as integer.
uint8_t ** extended_data
pointers to the data planes/channels.
#define av_malloc_array(a, b)
static int video_open(VideoState *is)
AVColorSpace
YUV colorspace type.
static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
static int opt_format(void *optctx, const char *opt, const char *arg)
AVSampleFormat
Audio sample formats.
int parse_options(void *optctx, int argc, char **argv, const OptionDef *options, int(*parse_arg_function)(void *, const char *))
#define AV_PIX_FMT_RGB555
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
int vk_renderer_resize(VkRenderer *renderer, int width, int height)
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
static void update_sample_display(VideoState *is, short *samples, int samples_size)
@ AV_SAMPLE_FMT_S16
signed 16 bits
New fields can be added to the end with minor version bumps.
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
#define AV_PIX_FMT_BGR565
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
const char * name
Pad name.
static VkRenderer * vk_renderer
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
static enum AVAlphaMode sdl_supported_alpha_modes[]
static Frame * frame_queue_peek_readable(FrameQueue *f)
static const AVFilterPad outputs[]
#define AV_PIX_FMT_RGB565
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
int disposition
Stream disposition - a combination of AV_DISPOSITION_* flags.
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
#define EXTERNAL_CLOCK_MAX_FRAMES
AVRational av_guess_frame_rate(AVFormatContext *format, AVStream *st, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
int h
height of pict, undefined when pict is not set
static VideoState * stream_open(const char *filename, const AVInputFormat *iformat)
#define FFSWAP(type, a, b)
@ AVALPHA_MODE_UNSPECIFIED
Unknown alpha handling, or no alpha channel.
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal codec state / flush internal buffers.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AV_PIX_FMT_0RGB32
static int filter_nbthreads
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
int av_hwdevice_ctx_create(AVBufferRef **pdevice_ref, enum AVHWDeviceType type, const char *device, AVDictionary *opts, int flags)
Open a device of the specified type and create an AVHWDeviceContext for it.
static int find_stream_info
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
static const char * hwaccel
void av_bprintf(AVBPrint *buf, const char *fmt,...)
AVFilterContext * out_video_filter
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
void remove_avoptions(AVDictionary **a, AVDictionary *b)
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
#define CMDUTILS_COMMON_OPTIONS
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
@ AV_TX_FLOAT_RDFT
Real to complex and complex to real DFTs.
static int opt_width(void *optctx, const char *opt, const char *arg)
int main(int argc, char **argv)
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
static void show_usage(void)
enum AVColorRange color_range
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
main external API structure.
static void packet_queue_start(PacketQueue *q)
static const char * audio_codec_name
This structure contains the parameters describing the frames that will be passed to this filter.
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
int vk_renderer_display(VkRenderer *renderer, AVFrame *frame, RenderParams *render_params)
#define AV_SYNC_FRAMEDUP_THRESHOLD
static enum ShowMode show_mode
int format
video: the pixel format, value corresponds to enum AVPixelFormat audio: the sample format,...
int attribute_align_arg sws_scale(SwsContext *sws, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
Initialize a filter with the supplied dictionary of options.
AVFilterContext * in_video_filter
@ AV_OPT_TYPE_INT
Underlying C type is int.
static void fill_rectangle(int x, int y, int w, int h)
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
unsigned int audio_buf1_size
int av_dict_parse_string(AVDictionary **pm, const char *str, const char *key_val_sep, const char *pairs_sep, int flags)
Parse the key/value pairs list and add the parsed entries to a dictionary.
#define AV_SYNC_THRESHOLD_MAX
static void decoder_destroy(Decoder *d)
enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
Get the packed alternative form of the given sample format.
int eof_reached
true if was unable to read due to error or eof
static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue)
Filter the word “frame” indicates either a video frame or a group of audio samples
AVFilterContext * out_audio_filter
const AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
@ AV_OPT_TYPE_PIXEL_FMT
Underlying C type is enum AVPixelFormat.
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
#define GROW_ARRAY(array, nb_elems)
#define SUBPICTURE_QUEUE_SIZE
static const char * input_filename
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
static void stream_toggle_pause(VideoState *is)
SDL_cond * continue_read_thread
static char * vulkan_params
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set() that converts the value to a string and stores it.
static void toggle_audio_display(VideoState *is)
enum AVMediaType codec_type
char * av_strdup(const char *s)
Duplicate a string.
#define AV_OPT_FLAG_DECODING_PARAM
A generic parameter which can be set by the user for demuxing or decoding.
A reference to a data buffer.
static int get_video_frame(VideoState *is, AVFrame *frame)
Structure to hold side data for an AVFrame.
static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph, AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
struct SwsContext * sub_convert_ctx
This structure stores compressed data.
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
static void stream_component_close(VideoState *is, int stream_index)
int64_t pos
byte position in stream, -1 if unknown
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
void dump_dictionary(void *ctx, const AVDictionary *m, const char *name, const char *indent, int log_level)
This does the same as libavformat/dump.c corresponding function and should probably be kept in sync w...
unsigned int audio_buf_size
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
The exact code depends on how similar the blocks are and how related they are to the block
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds.
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
#define ns(max_value, name, subs,...)
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base.
static Frame * frame_queue_peek_last(FrameQueue *f)
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int by_bytes)
static int decoder_reorder_pts
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
struct AudioParams audio_tgt
AVRational time_base
time base in which the start/end timestamps are specified
Main external API structure.
const AVClass * avfilter_get_class(void)
A linked-list of the inputs/outputs of the filter chain.
static void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are short
static void video_audio_display(VideoState *s)
#define AV_SYNC_THRESHOLD_MIN
static void check_external_clock_speed(VideoState *is)
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
uint32_t start_display_time
#define SAMPLE_CORRECTION_PERCENT_MAX
#define EXTERNAL_CLOCK_SPEED_MAX
static int packet_queue_put_nullpacket(PacketQueue *q, AVPacket *pkt, int stream_index)
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
static int is_realtime(AVFormatContext *s)
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
static int decode_interrupt_cb(void *ctx)
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
FF_VISIBILITY_POP_HIDDEN av_cold void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
int avio_feof(AVIOContext *s)
Similar to feof() but also returns nonzero on read errors.
static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
#define AV_PIX_FMT_RGB444
static int exit_on_keydown