[FFmpeg-cvslog] fftools/ffplay: depend on avfilter
Anton Khirnov
git at videolan.org
Mon Mar 20 11:50:23 EET 2023
ffmpeg | branch: master | Anton Khirnov <anton at khirnov.net> | Fri Mar 10 09:39:06 2023 +0100| [c29e5ab5c1d0fc161018d02b0308104a88e110ad] | committer: Anton Khirnov
fftools/ffplay: depend on avfilter
Making lavfi optional adds a lot of complexity for very questionable
gain.
> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=c29e5ab5c1d0fc161018d02b0308104a88e110ad
---
configure | 2 +-
fftools/ffplay.c | 80 +++++---------------------------------------------------
2 files changed, 7 insertions(+), 75 deletions(-)
diff --git a/configure b/configure
index 0370e25577..8980cec7ee 100755
--- a/configure
+++ b/configure
@@ -3836,7 +3836,7 @@ ffmpeg_select="aformat_filter anull_filter atrim_filter format_filter
hflip_filter null_filter
transpose_filter trim_filter vflip_filter"
ffmpeg_suggest="ole32 psapi shell32"
-ffplay_deps="avcodec avformat swscale swresample sdl2"
+ffplay_deps="avcodec avformat avfilter swscale swresample sdl2"
ffplay_select="rdft crop_filter transpose_filter hflip_filter vflip_filter rotate_filter"
ffplay_suggest="shell32"
ffprobe_deps="avcodec avformat"
diff --git a/fftools/ffplay.c b/fftools/ffplay.c
index f09ff59ccc..0a96511475 100644
--- a/fftools/ffplay.c
+++ b/fftools/ffplay.c
@@ -50,11 +50,9 @@
#include "libavcodec/avfft.h"
#include "libswresample/swresample.h"
-#if CONFIG_AVFILTER
-# include "libavfilter/avfilter.h"
-# include "libavfilter/buffersink.h"
-# include "libavfilter/buffersrc.h"
-#endif
+#include "libavfilter/avfilter.h"
+#include "libavfilter/buffersink.h"
+#include "libavfilter/buffersrc.h"
#include <SDL.h>
#include <SDL_thread.h>
@@ -109,8 +107,6 @@ const int program_birth_year = 2003;
#define USE_ONEPASS_SUBTITLE_RENDER 1
-static unsigned sws_flags = SWS_BICUBIC;
-
typedef struct MyAVPacketList {
AVPacket *pkt;
int serial;
@@ -250,9 +246,7 @@ typedef struct VideoState {
int audio_volume;
int muted;
struct AudioParams audio_src;
-#if CONFIG_AVFILTER
struct AudioParams audio_filter_src;
-#endif
struct AudioParams audio_tgt;
struct SwrContext *swr_ctx;
int frame_drops_early;
@@ -284,7 +278,6 @@ typedef struct VideoState {
AVStream *video_st;
PacketQueue videoq;
double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
- struct SwsContext *img_convert_ctx;
struct SwsContext *sub_convert_ctx;
int eof;
@@ -292,14 +285,12 @@ typedef struct VideoState {
int width, height, xleft, ytop;
int step;
-#if CONFIG_AVFILTER
int vfilter_idx;
AVFilterContext *in_video_filter; // the first filter in the video chain
AVFilterContext *out_video_filter; // the last filter in the video chain
AVFilterContext *in_audio_filter; // the first filter in the audio chain
AVFilterContext *out_audio_filter; // the last filter in the audio chain
AVFilterGraph *agraph; // audio filter graph
-#endif
int last_video_stream, last_audio_stream, last_subtitle_stream;
@@ -347,11 +338,9 @@ static const char *video_codec_name;
double rdftspeed = 0.02;
static int64_t cursor_last_shown;
static int cursor_hidden = 0;
-#if CONFIG_AVFILTER
static const char **vfilters_list = NULL;
static int nb_vfilters = 0;
static char *afilters = NULL;
-#endif
static int autorotate = 1;
static int find_stream_info = 1;
static int filter_nbthreads = 0;
@@ -393,14 +382,12 @@ static const struct TextureFormatEntry {
{ AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
};
-#if CONFIG_AVFILTER
static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
{
GROW_ARRAY(vfilters_list, nb_vfilters);
vfilters_list[nb_vfilters - 1] = arg;
return 0;
}
-#endif
static inline
int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
@@ -891,7 +878,8 @@ static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_B
}
}
-static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx) {
+static int upload_texture(SDL_Texture **tex, AVFrame *frame)
+{
int ret = 0;
Uint32 sdl_pix_fmt;
SDL_BlendMode sdl_blendmode;
@@ -899,24 +887,6 @@ static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext *
if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
return -1;
switch (sdl_pix_fmt) {
- case SDL_PIXELFORMAT_UNKNOWN:
- /* This should only happen if we are not using avfilter... */
- *img_convert_ctx = sws_getCachedContext(*img_convert_ctx,
- frame->width, frame->height, frame->format, frame->width, frame->height,
- AV_PIX_FMT_BGRA, sws_flags, NULL, NULL, NULL);
- if (*img_convert_ctx != NULL) {
- uint8_t *pixels[4];
- int pitch[4];
- if (!SDL_LockTexture(*tex, NULL, (void **)pixels, pitch)) {
- sws_scale(*img_convert_ctx, (const uint8_t * const *)frame->data, frame->linesize,
- 0, frame->height, pixels, pitch);
- SDL_UnlockTexture(*tex);
- }
- } else {
- av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
- ret = -1;
- }
- break;
case SDL_PIXELFORMAT_IYUV:
if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
@@ -1014,7 +984,7 @@ static void video_image_display(VideoState *is)
set_sdl_yuv_conversion_mode(vp->frame);
if (!vp->uploaded) {
- if (upload_texture(&is->vid_texture, vp->frame, &is->img_convert_ctx) < 0) {
+ if (upload_texture(&is->vid_texture, vp->frame) < 0) {
set_sdl_yuv_conversion_mode(NULL);
return;
}
@@ -1272,7 +1242,6 @@ static void stream_close(VideoState *is)
frame_queue_destory(&is->sampq);
frame_queue_destory(&is->subpq);
SDL_DestroyCond(is->continue_read_thread);
- sws_freeContext(is->img_convert_ctx);
sws_freeContext(is->sub_convert_ctx);
av_free(is->filename);
if (is->vis_texture)
@@ -1294,9 +1263,7 @@ static void do_exit(VideoState *is)
if (window)
SDL_DestroyWindow(window);
uninit_opts();
-#if CONFIG_AVFILTER
av_freep(&vfilters_list);
-#endif
avformat_network_deinit();
if (show_status)
printf("\n");
@@ -1794,7 +1761,6 @@ static int get_video_frame(VideoState *is, AVFrame *frame)
return got_picture;
}
-#if CONFIG_AVFILTER
static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
{
@@ -2022,17 +1988,14 @@ end:
return ret;
}
-#endif /* CONFIG_AVFILTER */
static int audio_thread(void *arg)
{
VideoState *is = arg;
AVFrame *frame = av_frame_alloc();
Frame *af;
-#if CONFIG_AVFILTER
int last_serial = -1;
int reconfigure;
-#endif
int got_frame = 0;
AVRational tb;
int ret = 0;
@@ -2047,7 +2010,6 @@ static int audio_thread(void *arg)
if (got_frame) {
tb = (AVRational){1, frame->sample_rate};
-#if CONFIG_AVFILTER
reconfigure =
cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.ch_layout.nb_channels,
frame->format, frame->ch_layout.nb_channels) ||
@@ -2080,7 +2042,6 @@ static int audio_thread(void *arg)
while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
tb = av_buffersink_get_time_base(is->out_audio_filter);
-#endif
if (!(af = frame_queue_peek_writable(&is->sampq)))
goto the_end;
@@ -2092,19 +2053,15 @@ static int audio_thread(void *arg)
av_frame_move_ref(af->frame, frame);
frame_queue_push(&is->sampq);
-#if CONFIG_AVFILTER
if (is->audioq.serial != is->auddec.pkt_serial)
break;
}
if (ret == AVERROR_EOF)
is->auddec.finished = is->auddec.pkt_serial;
-#endif
}
} while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
the_end:
-#if CONFIG_AVFILTER
avfilter_graph_free(&is->agraph);
-#endif
av_frame_free(&frame);
return ret;
}
@@ -2130,7 +2087,6 @@ static int video_thread(void *arg)
AVRational tb = is->video_st->time_base;
AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
-#if CONFIG_AVFILTER
AVFilterGraph *graph = NULL;
AVFilterContext *filt_out = NULL, *filt_in = NULL;
int last_w = 0;
@@ -2138,7 +2094,6 @@ static int video_thread(void *arg)
enum AVPixelFormat last_format = -2;
int last_serial = -1;
int last_vfilter_idx = 0;
-#endif
if (!frame)
return AVERROR(ENOMEM);
@@ -2150,7 +2105,6 @@ static int video_thread(void *arg)
if (!ret)
continue;
-#if CONFIG_AVFILTER
if ( last_w != frame->width
|| last_h != frame->height
|| last_format != frame->format
@@ -2205,24 +2159,19 @@ static int video_thread(void *arg)
if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
is->frame_last_filter_delay = 0;
tb = av_buffersink_get_time_base(filt_out);
-#endif
duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);
av_frame_unref(frame);
-#if CONFIG_AVFILTER
if (is->videoq.serial != is->viddec.pkt_serial)
break;
}
-#endif
if (ret < 0)
goto the_end;
}
the_end:
-#if CONFIG_AVFILTER
avfilter_graph_free(&graph);
-#endif
av_frame_free(&frame);
return 0;
}
@@ -2631,7 +2580,6 @@ static int stream_component_open(VideoState *is, int stream_index)
ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
switch (avctx->codec_type) {
case AVMEDIA_TYPE_AUDIO:
-#if CONFIG_AVFILTER
{
AVFilterContext *sink;
@@ -2648,12 +2596,6 @@ static int stream_component_open(VideoState *is, int stream_index)
if (ret < 0)
goto fail;
}
-#else
- sample_rate = avctx->sample_rate;
- ret = av_channel_layout_copy(&ch_layout, &avctx->ch_layout);
- if (ret < 0)
- goto fail;
-#endif
/* prepare audio output */
if ((ret = audio_open(is, &ch_layout, sample_rate, &is->audio_tgt)) < 0)
@@ -3327,7 +3269,6 @@ static void event_loop(VideoState *cur_stream)
stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
break;
case SDLK_w:
-#if CONFIG_AVFILTER
if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
if (++cur_stream->vfilter_idx >= nb_vfilters)
cur_stream->vfilter_idx = 0;
@@ -3335,9 +3276,6 @@ static void event_loop(VideoState *cur_stream)
cur_stream->vfilter_idx = 0;
toggle_audio_display(cur_stream);
}
-#else
- toggle_audio_display(cur_stream);
-#endif
break;
case SDLK_PAGEUP:
if (cur_stream->ic->nb_chapters <= 1) {
@@ -3601,10 +3539,8 @@ static const OptionDef options[] = {
{ "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
{ "left", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_left }, "set the x position for the left of the window", "x pos" },
{ "top", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_top }, "set the y position for the top of the window", "y pos" },
-#if CONFIG_AVFILTER
{ "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
{ "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
-#endif
{ "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
{ "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
{ "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
@@ -3635,11 +3571,7 @@ void show_help_default(const char *opt, const char *arg)
printf("\n");
show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
-#if !CONFIG_AVFILTER
- show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
-#else
show_help_children(avfilter_get_class(), AV_OPT_FLAG_FILTERING_PARAM);
-#endif
printf("\nWhile playing:\n"
"q, ESC quit\n"
"f toggle full screen\n"
More information about the ffmpeg-cvslog
mailing list