[FFmpeg-cvslog] avfilter: do not use AVFrame accessor

Muhammad Faiz git at videolan.org
Sun Apr 23 11:13:31 EEST 2017


ffmpeg | branch: master | Muhammad Faiz <mfcc64 at gmail.com> | Sat Apr 22 15:57:18 2017 +0700| [6af050d7d0c3c73f3d62115152db82ebd2dc5d57] | committer: Muhammad Faiz

avfilter: do not use AVFrame accessor

Reviewed-by: wm4 <nfxjfg at googlemail.com>
Signed-off-by: Muhammad Faiz <mfcc64 at gmail.com>

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=6af050d7d0c3c73f3d62115152db82ebd2dc5d57
---

 libavfilter/af_afade.c             |  6 +++---
 libavfilter/af_amerge.c            |  2 +-
 libavfilter/af_apad.c              |  2 +-
 libavfilter/af_aphaser.c           |  2 +-
 libavfilter/af_aresample.c         |  2 +-
 libavfilter/af_ashowinfo.c         |  6 +++---
 libavfilter/af_astats.c            |  2 +-
 libavfilter/af_biquads.c           |  2 +-
 libavfilter/af_channelmap.c        |  2 +-
 libavfilter/af_channelsplit.c      |  2 +-
 libavfilter/af_dynaudnorm.c        |  6 +++---
 libavfilter/af_join.c              |  2 +-
 libavfilter/af_pan.c               |  2 +-
 libavfilter/af_volume.c            |  2 +-
 libavfilter/af_volumedetect.c      |  2 +-
 libavfilter/asrc_flite.c           |  4 ++--
 libavfilter/avf_aphasemeter.c      |  2 +-
 libavfilter/avf_showcqt.c          |  4 ++--
 libavfilter/avf_showspectrum.c     |  2 +-
 libavfilter/avfilter.c             |  8 ++++----
 libavfilter/buffersrc.c            |  4 ++--
 libavfilter/f_drawgraph.c          |  2 +-
 libavfilter/f_loop.c               |  4 ++--
 libavfilter/f_metadata.c           |  2 +-
 libavfilter/f_select.c             |  6 +++---
 libavfilter/fifo.c                 |  2 +-
 libavfilter/framepool.c            |  2 +-
 libavfilter/setpts.c               |  2 +-
 libavfilter/src_movie.c            |  2 +-
 libavfilter/vf_bbox.c              |  2 +-
 libavfilter/vf_blackdetect.c       |  4 ++--
 libavfilter/vf_blackframe.c        |  2 +-
 libavfilter/vf_colormatrix.c       | 12 ++++++------
 libavfilter/vf_crop.c              |  4 ++--
 libavfilter/vf_cropdetect.c        |  2 +-
 libavfilter/vf_deflicker.c         |  2 +-
 libavfilter/vf_deinterlace_vaapi.c |  2 +-
 libavfilter/vf_drawtext.c          |  2 +-
 libavfilter/vf_eq.c                |  2 +-
 libavfilter/vf_idet.c              |  2 +-
 libavfilter/vf_ocr.c               |  2 +-
 libavfilter/vf_overlay.c           |  2 +-
 libavfilter/vf_psnr.c              |  2 +-
 libavfilter/vf_readeia608.c        |  4 ++--
 libavfilter/vf_readvitc.c          |  4 ++--
 libavfilter/vf_scale.c             |  8 ++++----
 libavfilter/vf_showinfo.c          |  2 +-
 libavfilter/vf_ssim.c              |  2 +-
 libavfilter/vf_swaprect.c          |  2 +-
 libavfilter/vf_vectorscope.c       |  2 +-
 libavfilter/vf_waveform.c          |  2 +-
 libavfilter/vsrc_testsrc.c         |  4 ++--
 52 files changed, 80 insertions(+), 80 deletions(-)

diff --git a/libavfilter/af_afade.c b/libavfilter/af_afade.c
index 9acadc51c5..3a6266f0cd 100644
--- a/libavfilter/af_afade.c
+++ b/libavfilter/af_afade.c
@@ -291,7 +291,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
     if ((!s->type && (cur_sample + nb_samples < s->start_sample)) ||
         ( s->type && (s->start_sample + s->nb_samples < cur_sample))) {
         av_samples_set_silence(out_buf->extended_data, 0, nb_samples,
-                               av_frame_get_channels(out_buf), out_buf->format);
+                               out_buf->channels, out_buf->format);
     } else {
         int64_t start;
 
@@ -301,7 +301,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
             start = s->start_sample + s->nb_samples - cur_sample;
 
         s->fade_samples(out_buf->extended_data, buf->extended_data,
-                        nb_samples, av_frame_get_channels(buf),
+                        nb_samples, buf->channels,
                         s->type ? -1 : 1, start,
                         s->nb_samples, s->curve);
     }
@@ -498,7 +498,7 @@ static int acrossfade_filter_frame(AVFilterLink *inlink, AVFrame *in)
 
             s->crossfade_samples(out->extended_data, cf[0]->extended_data,
                                  cf[1]->extended_data,
-                                 s->nb_samples, av_frame_get_channels(in),
+                                 s->nb_samples, in->channels,
                                  s->curve, s->curve2);
             out->pts = s->pts;
             s->pts += av_rescale_q(s->nb_samples,
diff --git a/libavfilter/af_amerge.c b/libavfilter/af_amerge.c
index 8ea01e206b..3cf36b3038 100644
--- a/libavfilter/af_amerge.c
+++ b/libavfilter/af_amerge.c
@@ -280,7 +280,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
 
     outbuf->nb_samples     = nb_samples;
     outbuf->channel_layout = outlink->channel_layout;
-    av_frame_set_channels(outbuf, outlink->channels);
+    outbuf->channels       = outlink->channels;
 
     while (nb_samples) {
         ns = nb_samples;
diff --git a/libavfilter/af_apad.c b/libavfilter/af_apad.c
index 0a2d4206a9..8171f2a3d4 100644
--- a/libavfilter/af_apad.c
+++ b/libavfilter/af_apad.c
@@ -119,7 +119,7 @@ static int request_frame(AVFilterLink *outlink)
 
         av_samples_set_silence(outsamplesref->extended_data, 0,
                                n_out,
-                               av_frame_get_channels(outsamplesref),
+                               outsamplesref->channels,
                                outsamplesref->format);
 
         outsamplesref->pts = s->next_pts;
diff --git a/libavfilter/af_aphaser.c b/libavfilter/af_aphaser.c
index 33ecb1a7fb..780407e924 100644
--- a/libavfilter/af_aphaser.c
+++ b/libavfilter/af_aphaser.c
@@ -254,7 +254,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inbuf)
     }
 
     s->phaser(s, inbuf->extended_data, outbuf->extended_data,
-              outbuf->nb_samples, av_frame_get_channels(outbuf));
+              outbuf->nb_samples, outbuf->channels);
 
     if (inbuf != outbuf)
         av_frame_free(&inbuf);
diff --git a/libavfilter/af_aresample.c b/libavfilter/af_aresample.c
index 028e105318..2b55d70181 100644
--- a/libavfilter/af_aresample.c
+++ b/libavfilter/af_aresample.c
@@ -200,7 +200,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamplesref)
 
     av_frame_copy_props(outsamplesref, insamplesref);
     outsamplesref->format                = outlink->format;
-    av_frame_set_channels(outsamplesref, outlink->channels);
+    outsamplesref->channels              = outlink->channels;
     outsamplesref->channel_layout        = outlink->channel_layout;
     outsamplesref->sample_rate           = outlink->sample_rate;
 
diff --git a/libavfilter/af_ashowinfo.c b/libavfilter/af_ashowinfo.c
index a81729f7f7..9046e8d84a 100644
--- a/libavfilter/af_ashowinfo.c
+++ b/libavfilter/af_ashowinfo.c
@@ -199,7 +199,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
                        s->plane_checksums[0];
     }
 
-    av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str), av_frame_get_channels(buf),
+    av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str), buf->channels,
                                  buf->channel_layout);
 
     av_log(ctx, AV_LOG_INFO,
@@ -208,8 +208,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
            "checksum:%08"PRIX32" ",
            inlink->frame_count_out,
            av_ts2str(buf->pts), av_ts2timestr(buf->pts, &inlink->time_base),
-           av_frame_get_pkt_pos(buf),
-           av_get_sample_fmt_name(buf->format), av_frame_get_channels(buf), chlayout_str,
+           buf->pkt_pos,
+           av_get_sample_fmt_name(buf->format), buf->channels, chlayout_str,
            buf->sample_rate, buf->nb_samples,
            checksum);
 
diff --git a/libavfilter/af_astats.c b/libavfilter/af_astats.c
index e7f9675c2e..8813b52109 100644
--- a/libavfilter/af_astats.c
+++ b/libavfilter/af_astats.c
@@ -305,7 +305,7 @@ static void set_metadata(AudioStatsContext *s, AVDictionary **metadata)
 static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
 {
     AudioStatsContext *s = inlink->dst->priv;
-    AVDictionary **metadata = avpriv_frame_get_metadatap(buf);
+    AVDictionary **metadata = &buf->metadata;
     const int channels = s->nb_channels;
     int i, c;
 
diff --git a/libavfilter/af_biquads.c b/libavfilter/af_biquads.c
index 79f1b7cf4c..a39d09dbec 100644
--- a/libavfilter/af_biquads.c
+++ b/libavfilter/af_biquads.c
@@ -411,7 +411,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
         av_frame_copy_props(out_buf, buf);
     }
 
-    for (ch = 0; ch < av_frame_get_channels(buf); ch++)
+    for (ch = 0; ch < buf->channels; ch++)
         s->filter(s, buf->extended_data[ch],
                   out_buf->extended_data[ch], nb_samples,
                   &s->cache[ch].i1, &s->cache[ch].i2,
diff --git a/libavfilter/af_channelmap.c b/libavfilter/af_channelmap.c
index cdd8a5885c..7c2be95bfd 100644
--- a/libavfilter/af_channelmap.c
+++ b/libavfilter/af_channelmap.c
@@ -354,7 +354,7 @@ static int channelmap_filter_frame(AVFilterLink *inlink, AVFrame *buf)
            FFMIN(FF_ARRAY_ELEMS(buf->data), nch_out) * sizeof(buf->data[0]));
 
     buf->channel_layout = outlink->channel_layout;
-    av_frame_set_channels(buf, outlink->channels);
+    buf->channels       = outlink->channels;
 
     return ff_filter_frame(outlink, buf);
 }
diff --git a/libavfilter/af_channelsplit.c b/libavfilter/af_channelsplit.c
index f50414984a..248eaca7e0 100644
--- a/libavfilter/af_channelsplit.c
+++ b/libavfilter/af_channelsplit.c
@@ -120,7 +120,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
         buf_out->data[0] = buf_out->extended_data[0] = buf_out->extended_data[i];
         buf_out->channel_layout =
             av_channel_layout_extract_channel(buf->channel_layout, i);
-        av_frame_set_channels(buf_out, 1);
+        buf_out->channels = 1;
 
         ret = ff_filter_frame(ctx->outputs[i], buf_out);
         if (ret < 0)
diff --git a/libavfilter/af_dynaudnorm.c b/libavfilter/af_dynaudnorm.c
index aa5b28e647..d4ebd96590 100644
--- a/libavfilter/af_dynaudnorm.c
+++ b/libavfilter/af_dynaudnorm.c
@@ -358,7 +358,7 @@ static double find_peak_magnitude(AVFrame *frame, int channel)
     int c, i;
 
     if (channel == -1) {
-        for (c = 0; c < av_frame_get_channels(frame); c++) {
+        for (c = 0; c < frame->channels; c++) {
             double *data_ptr = (double *)frame->extended_data[c];
 
             for (i = 0; i < frame->nb_samples; i++)
@@ -380,7 +380,7 @@ static double compute_frame_rms(AVFrame *frame, int channel)
     int c, i;
 
     if (channel == -1) {
-        for (c = 0; c < av_frame_get_channels(frame); c++) {
+        for (c = 0; c < frame->channels; c++) {
             const double *data_ptr = (double *)frame->extended_data[c];
 
             for (i = 0; i < frame->nb_samples; i++) {
@@ -388,7 +388,7 @@ static double compute_frame_rms(AVFrame *frame, int channel)
             }
         }
 
-        rms_value /= frame->nb_samples * av_frame_get_channels(frame);
+        rms_value /= frame->nb_samples * frame->channels;
     } else {
         const double *data_ptr = (double *)frame->extended_data[channel];
         for (i = 0; i < frame->nb_samples; i++) {
diff --git a/libavfilter/af_join.c b/libavfilter/af_join.c
index bd780cc379..74ecce052d 100644
--- a/libavfilter/af_join.c
+++ b/libavfilter/af_join.c
@@ -491,7 +491,7 @@ static int try_push_frame(AVFilterContext *ctx)
 
     frame->nb_samples     = nb_samples;
     frame->channel_layout = outlink->channel_layout;
-    av_frame_set_channels(frame, outlink->channels);
+    frame->channels       = outlink->channels;
     frame->sample_rate    = outlink->sample_rate;
     frame->format         = outlink->format;
     frame->pts            = s->input_frames[0]->pts;
diff --git a/libavfilter/af_pan.c b/libavfilter/af_pan.c
index a477bde460..63d7750f35 100644
--- a/libavfilter/af_pan.c
+++ b/libavfilter/af_pan.c
@@ -389,7 +389,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
                 (void *)insamples->extended_data, n);
     av_frame_copy_props(outsamples, insamples);
     outsamples->channel_layout = outlink->channel_layout;
-    av_frame_set_channels(outsamples, outlink->channels);
+    outsamples->channels = outlink->channels;
 
     ret = ff_filter_frame(outlink, outsamples);
     av_frame_free(&insamples);
diff --git a/libavfilter/af_volume.c b/libavfilter/af_volume.c
index 68134033ec..9ed2dbace3 100644
--- a/libavfilter/af_volume.c
+++ b/libavfilter/af_volume.c
@@ -395,7 +395,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
     vol->var_values[VAR_T  ] = TS2T(buf->pts, inlink->time_base);
     vol->var_values[VAR_N  ] = inlink->frame_count_out;
 
-    pos = av_frame_get_pkt_pos(buf);
+    pos = buf->pkt_pos;
     vol->var_values[VAR_POS] = pos == -1 ? NAN : pos;
     if (vol->eval_mode == EVAL_MODE_FRAME)
         set_volume(ctx);
diff --git a/libavfilter/af_volumedetect.c b/libavfilter/af_volumedetect.c
index 0143940ef3..c7d58e84c7 100644
--- a/libavfilter/af_volumedetect.c
+++ b/libavfilter/af_volumedetect.c
@@ -62,7 +62,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *samples)
     AVFilterContext *ctx = inlink->dst;
     VolDetectContext *vd = ctx->priv;
     int nb_samples  = samples->nb_samples;
-    int nb_channels = av_frame_get_channels(samples);
+    int nb_channels = samples->channels;
     int nb_planes   = nb_channels;
     int plane, i;
     int16_t *pcm;
diff --git a/libavfilter/asrc_flite.c b/libavfilter/asrc_flite.c
index 2e5bd4b6c0..d13a4a3b19 100644
--- a/libavfilter/asrc_flite.c
+++ b/libavfilter/asrc_flite.c
@@ -255,8 +255,8 @@ static int request_frame(AVFilterLink *outlink)
     memcpy(samplesref->data[0], flite->wave_samples,
            nb_samples * flite->wave->num_channels * 2);
     samplesref->pts = flite->pts;
-    av_frame_set_pkt_pos(samplesref, -1);
-    av_frame_set_sample_rate(samplesref, flite->wave->sample_rate);
+    samplesref->pkt_pos = -1;
+    samplesref->sample_rate = flite->wave->sample_rate;
     flite->pts += nb_samples;
     flite->wave_samples += nb_samples * flite->wave->num_channels;
     flite->wave_nb_samples -= nb_samples;
diff --git a/libavfilter/avf_aphasemeter.c b/libavfilter/avf_aphasemeter.c
index bfd77861ec..8cdee9464c 100644
--- a/libavfilter/avf_aphasemeter.c
+++ b/libavfilter/avf_aphasemeter.c
@@ -204,7 +204,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
             memcpy(out->data[0] + i * out->linesize[0], out->data[0], outlink->w * 4);
     }
 
-    metadata = avpriv_frame_get_metadatap(in);
+    metadata = &in->metadata;
     if (metadata) {
         uint8_t value[128];
 
diff --git a/libavfilter/avf_showcqt.c b/libavfilter/avf_showcqt.c
index 7bc3a260c3..875ba48cee 100644
--- a/libavfilter/avf_showcqt.c
+++ b/libavfilter/avf_showcqt.c
@@ -1169,8 +1169,8 @@ static int plot_cqt(AVFilterContext *ctx, AVFrame **frameout)
         if (!out)
             return AVERROR(ENOMEM);
         out->sample_aspect_ratio = av_make_q(1, 1);
-        av_frame_set_color_range(out, AVCOL_RANGE_MPEG);
-        av_frame_set_colorspace(out, s->csp);
+        out->color_range = AVCOL_RANGE_MPEG;
+        out->colorspace = s->csp;
         UPDATE_TIME(s->alloc_time);
 
         if (s->bar_h) {
diff --git a/libavfilter/avf_showspectrum.c b/libavfilter/avf_showspectrum.c
index 09b5a2a51f..4dd52a2c0b 100644
--- a/libavfilter/avf_showspectrum.c
+++ b/libavfilter/avf_showspectrum.c
@@ -426,7 +426,7 @@ static int config_output(AVFilterLink *outlink)
             memset(outpicref->data[1] + i * outpicref->linesize[1], 128, outlink->w);
             memset(outpicref->data[2] + i * outpicref->linesize[2], 128, outlink->w);
         }
-        av_frame_set_color_range(outpicref, AVCOL_RANGE_JPEG);
+        outpicref->color_range = AVCOL_RANGE_JPEG;
     }
 
     if ((s->orientation == VERTICAL   && s->xpos >= s->w) ||
diff --git a/libavfilter/avfilter.c b/libavfilter/avfilter.c
index ecfb872ed8..08b86b010d 100644
--- a/libavfilter/avfilter.c
+++ b/libavfilter/avfilter.c
@@ -53,7 +53,7 @@ void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
             "ref[%p buf:%p data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64,
             ref, ref->buf, ref->data[0],
             ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3],
-            ref->pts, av_frame_get_pkt_pos(ref));
+            ref->pts, ref->pkt_pos);
 
     if (ref->width) {
         ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c",
@@ -1143,7 +1143,7 @@ int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
             av_log(link->dst, AV_LOG_ERROR, "Format change is not supported\n");
             goto error;
         }
-        if (av_frame_get_channels(frame) != link->channels) {
+        if (frame->channels != link->channels) {
             av_log(link->dst, AV_LOG_ERROR, "Channel count change is not supported\n");
             goto error;
         }
@@ -1585,7 +1585,7 @@ int ff_inlink_make_frame_writable(AVFilterLink *link, AVFrame **rframe)
     case AVMEDIA_TYPE_AUDIO:
         av_samples_copy(out->extended_data, frame->extended_data,
                         0, 0, frame->nb_samples,
-                        av_frame_get_channels(frame),
+                        frame->channels,
                         frame->format);
         break;
     default:
@@ -1616,7 +1616,7 @@ int ff_inlink_evaluate_timeline_at_frame(AVFilterLink *link, const AVFrame *fram
 {
     AVFilterContext *dstctx = link->dst;
     int64_t pts = frame->pts;
-    int64_t pos = av_frame_get_pkt_pos(frame);
+    int64_t pos = frame->pkt_pos;
 
     if (!dstctx->enable_str)
         return 1;
diff --git a/libavfilter/buffersrc.c b/libavfilter/buffersrc.c
index 3f80d5f413..e8f59c2de7 100644
--- a/libavfilter/buffersrc.c
+++ b/libavfilter/buffersrc.c
@@ -155,7 +155,7 @@ int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFra
     int ret = 0;
 
     if (frame && frame->channel_layout &&
-        av_get_channel_layout_nb_channels(frame->channel_layout) != av_frame_get_channels(frame)) {
+        av_get_channel_layout_nb_channels(frame->channel_layout) != frame->channels) {
         av_log(ctx, AV_LOG_ERROR, "Layout indicates a different number of channels than actually present\n");
         return AVERROR(EINVAL);
     }
@@ -222,7 +222,7 @@ static int av_buffersrc_add_frame_internal(AVFilterContext *ctx,
         if (!frame->channel_layout)
             frame->channel_layout = s->channel_layout;
         CHECK_AUDIO_PARAM_CHANGE(ctx, s, frame->sample_rate, frame->channel_layout,
-                                 av_frame_get_channels(frame), frame->format);
+                                 frame->channels, frame->format);
         break;
     default:
         return AVERROR(EINVAL);
diff --git a/libavfilter/f_drawgraph.c b/libavfilter/f_drawgraph.c
index 4c705fe851..8be9b9f95a 100644
--- a/libavfilter/f_drawgraph.c
+++ b/libavfilter/f_drawgraph.c
@@ -200,7 +200,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
         av_frame_copy_props(out, in);
     }
 
-    metadata = av_frame_get_metadata(in);
+    metadata = in->metadata;
 
     for (i = 0; i < 4; i++) {
         double values[VAR_VARS_NB];
diff --git a/libavfilter/f_loop.c b/libavfilter/f_loop.c
index 5a3280772e..255fe643da 100644
--- a/libavfilter/f_loop.c
+++ b/libavfilter/f_loop.c
@@ -275,7 +275,7 @@ static int push_frame(AVFilterContext *ctx)
     if (!out)
         return AVERROR(ENOMEM);
     out->pts += s->duration - s->start_pts;
-    pts = out->pts + av_frame_get_pkt_duration(out);
+    pts = out->pts + out->pkt_duration;
     ret = ff_filter_frame(outlink, out);
     s->current_frame++;
 
@@ -307,7 +307,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
                 return AVERROR(ENOMEM);
             }
             s->nb_frames++;
-            s->duration = frame->pts + av_frame_get_pkt_duration(frame);
+            s->duration = frame->pts + frame->pkt_duration;
             ret = ff_filter_frame(outlink, frame);
         } else {
             av_frame_free(&frame);
diff --git a/libavfilter/f_metadata.c b/libavfilter/f_metadata.c
index 1f613ecb56..23bc254a75 100644
--- a/libavfilter/f_metadata.c
+++ b/libavfilter/f_metadata.c
@@ -280,7 +280,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
     AVFilterContext *ctx = inlink->dst;
     AVFilterLink *outlink = ctx->outputs[0];
     MetadataContext *s = ctx->priv;
-    AVDictionary **metadata = avpriv_frame_get_metadatap(frame);
+    AVDictionary **metadata = &frame->metadata;
     AVDictionaryEntry *e;
 
     if (!*metadata)
diff --git a/libavfilter/f_select.c b/libavfilter/f_select.c
index 03c1c0f3f3..c8602626b5 100644
--- a/libavfilter/f_select.c
+++ b/libavfilter/f_select.c
@@ -284,7 +284,7 @@ static double get_scene_score(AVFilterContext *ctx, AVFrame *frame)
 
 static double get_concatdec_select(AVFrame *frame, int64_t pts)
 {
-    AVDictionary *metadata = av_frame_get_metadata(frame);
+    AVDictionary *metadata = frame->metadata;
     AVDictionaryEntry *start_time_entry = av_dict_get(metadata, "lavf.concatdec.start_time", NULL, 0);
     AVDictionaryEntry *duration_entry = av_dict_get(metadata, "lavf.concatdec.duration", NULL, 0);
     if (start_time_entry) {
@@ -321,7 +321,7 @@ static void select_frame(AVFilterContext *ctx, AVFrame *frame)
     select->var_values[VAR_N  ] = inlink->frame_count_out;
     select->var_values[VAR_PTS] = TS2D(frame->pts);
     select->var_values[VAR_T  ] = TS2D(frame->pts) * av_q2d(inlink->time_base);
-    select->var_values[VAR_POS] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame);
+    select->var_values[VAR_POS] = frame->pkt_pos == -1 ? NAN : frame->pkt_pos;
     select->var_values[VAR_KEY] = frame->key_frame;
     select->var_values[VAR_CONCATDEC_SELECT] = get_concatdec_select(frame, av_rescale_q(frame->pts, inlink->time_base, AV_TIME_BASE_Q));
 
@@ -340,7 +340,7 @@ static void select_frame(AVFilterContext *ctx, AVFrame *frame)
             select->var_values[VAR_SCENE] = get_scene_score(ctx, frame);
             // TODO: document metadata
             snprintf(buf, sizeof(buf), "%f", select->var_values[VAR_SCENE]);
-            av_dict_set(avpriv_frame_get_metadatap(frame), "lavfi.scene_score", buf, 0);
+            av_dict_set(&frame->metadata, "lavfi.scene_score", buf, 0);
         }
         break;
     }
diff --git a/libavfilter/fifo.c b/libavfilter/fifo.c
index abfbba10bb..0fa0f86cb3 100644
--- a/libavfilter/fifo.c
+++ b/libavfilter/fifo.c
@@ -129,7 +129,7 @@ static void buffer_offset(AVFilterLink *link, AVFrame *frame,
 static int calc_ptr_alignment(AVFrame *frame)
 {
     int planes = av_sample_fmt_is_planar(frame->format) ?
-                 av_frame_get_channels(frame) : 1;
+                 frame->channels : 1;
     int min_align = 128;
     int p;
 
diff --git a/libavfilter/framepool.c b/libavfilter/framepool.c
index e1f1e2cc41..42c0e58498 100644
--- a/libavfilter/framepool.c
+++ b/libavfilter/framepool.c
@@ -240,7 +240,7 @@ AVFrame *ff_frame_pool_get(FFFramePool *pool)
         break;
     case AVMEDIA_TYPE_AUDIO:
         frame->nb_samples = pool->nb_samples;
-        av_frame_set_channels(frame, pool->channels);
+        frame->channels = pool->channels;
         frame->format = pool->format;
         frame->linesize[0] = pool->linesize[0];
 
diff --git a/libavfilter/setpts.c b/libavfilter/setpts.c
index 2ccca28e9a..4505498bf3 100644
--- a/libavfilter/setpts.c
+++ b/libavfilter/setpts.c
@@ -165,7 +165,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
     }
     setpts->var_values[VAR_PTS       ] = TS2D(frame->pts);
     setpts->var_values[VAR_T         ] = TS2T(frame->pts, inlink->time_base);
-    setpts->var_values[VAR_POS       ] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame);
+    setpts->var_values[VAR_POS       ] = frame->pkt_pos == -1 ? NAN : frame->pkt_pos;
     setpts->var_values[VAR_RTCTIME   ] = av_gettime();
 
     if (inlink->type == AVMEDIA_TYPE_VIDEO) {
diff --git a/libavfilter/src_movie.c b/libavfilter/src_movie.c
index a93842e8c1..a0dd66ea76 100644
--- a/libavfilter/src_movie.c
+++ b/libavfilter/src_movie.c
@@ -559,7 +559,7 @@ static int movie_push_frame(AVFilterContext *ctx, unsigned out_id)
         return 0;
     }
 
-    frame->pts = av_frame_get_best_effort_timestamp(frame);
+    frame->pts = frame->best_effort_timestamp;
     if (frame->pts != AV_NOPTS_VALUE) {
         if (movie->ts_offset)
             frame->pts += av_rescale_q_rnd(movie->ts_offset, AV_TIME_BASE_Q, outlink->time_base, AV_ROUND_UP);
diff --git a/libavfilter/vf_bbox.c b/libavfilter/vf_bbox.c
index 86054b2483..7d5055305d 100644
--- a/libavfilter/vf_bbox.c
+++ b/libavfilter/vf_bbox.c
@@ -84,7 +84,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
            av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base));
 
     if (has_bbox) {
-        AVDictionary **metadata = avpriv_frame_get_metadatap(frame);
+        AVDictionary **metadata = &frame->metadata;
 
         SET_META("lavfi.bbox.x1", box.x1)
         SET_META("lavfi.bbox.x2", box.x2)
diff --git a/libavfilter/vf_blackdetect.c b/libavfilter/vf_blackdetect.c
index 0f6adf49ed..28fdb48a92 100644
--- a/libavfilter/vf_blackdetect.c
+++ b/libavfilter/vf_blackdetect.c
@@ -164,7 +164,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
             /* black starts here */
             blackdetect->black_started = 1;
             blackdetect->black_start = picref->pts;
-            av_dict_set(avpriv_frame_get_metadatap(picref), "lavfi.black_start",
+            av_dict_set(&picref->metadata, "lavfi.black_start",
                 av_ts2timestr(blackdetect->black_start, &inlink->time_base), 0);
         }
     } else if (blackdetect->black_started) {
@@ -172,7 +172,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
         blackdetect->black_started = 0;
         blackdetect->black_end = picref->pts;
         check_black_end(ctx);
-        av_dict_set(avpriv_frame_get_metadatap(picref), "lavfi.black_end",
+        av_dict_set(&picref->metadata, "lavfi.black_end",
             av_ts2timestr(blackdetect->black_end, &inlink->time_base), 0);
     }
 
diff --git a/libavfilter/vf_blackframe.c b/libavfilter/vf_blackframe.c
index 9fe2a42942..804965c42c 100644
--- a/libavfilter/vf_blackframe.c
+++ b/libavfilter/vf_blackframe.c
@@ -85,7 +85,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
 
     pblack = s->nblack * 100 / (inlink->w * inlink->h);
     if (pblack >= s->bamount) {
-        metadata = avpriv_frame_get_metadatap(frame);
+        metadata = &frame->metadata;
 
         av_log(ctx, AV_LOG_INFO, "frame:%u pblack:%u pts:%"PRId64" t:%f "
                "type:%c last_keyframe:%d\n",
diff --git a/libavfilter/vf_colormatrix.c b/libavfilter/vf_colormatrix.c
index d237baa7b9..8ddec4ffda 100644
--- a/libavfilter/vf_colormatrix.c
+++ b/libavfilter/vf_colormatrix.c
@@ -435,7 +435,7 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
     av_frame_copy_props(out, in);
 
     if (color->source == COLOR_MODE_NONE) {
-        enum AVColorSpace cs = av_frame_get_colorspace(in);
+        enum AVColorSpace cs = in->colorspace;
         enum ColorMode source;
 
         switch(cs) {
@@ -456,11 +456,11 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
         color->mode = color->source * 5 + color->dest;
 
     switch(color->dest) {
-    case COLOR_MODE_BT709    : av_frame_set_colorspace(out, AVCOL_SPC_BT709)     ; break;
-    case COLOR_MODE_FCC      : av_frame_set_colorspace(out, AVCOL_SPC_FCC)       ; break;
-    case COLOR_MODE_SMPTE240M: av_frame_set_colorspace(out, AVCOL_SPC_SMPTE240M) ; break;
-    case COLOR_MODE_BT601    : av_frame_set_colorspace(out, AVCOL_SPC_BT470BG)   ; break;
-    case COLOR_MODE_BT2020   : av_frame_set_colorspace(out, AVCOL_SPC_BT2020_NCL); break;
+    case COLOR_MODE_BT709    : out->colorspace = AVCOL_SPC_BT709     ; break;
+    case COLOR_MODE_FCC      : out->colorspace = AVCOL_SPC_FCC       ; break;
+    case COLOR_MODE_SMPTE240M: out->colorspace = AVCOL_SPC_SMPTE240M ; break;
+    case COLOR_MODE_BT601    : out->colorspace = AVCOL_SPC_BT470BG   ; break;
+    case COLOR_MODE_BT2020   : out->colorspace = AVCOL_SPC_BT2020_NCL; break;
     }
 
     td.src = in;
diff --git a/libavfilter/vf_crop.c b/libavfilter/vf_crop.c
index 85ea892d01..7c31c1665d 100644
--- a/libavfilter/vf_crop.c
+++ b/libavfilter/vf_crop.c
@@ -258,8 +258,8 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame)
     s->var_values[VAR_N] = link->frame_count_out;
     s->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ?
         NAN : frame->pts * av_q2d(link->time_base);
-    s->var_values[VAR_POS] = av_frame_get_pkt_pos(frame) == -1 ?
-        NAN : av_frame_get_pkt_pos(frame);
+    s->var_values[VAR_POS] = frame->pkt_pos == -1 ?
+        NAN : frame->pkt_pos;
     s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, NULL);
     s->var_values[VAR_Y] = av_expr_eval(s->y_pexpr, s->var_values, NULL);
     s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, NULL);
diff --git a/libavfilter/vf_cropdetect.c b/libavfilter/vf_cropdetect.c
index 4a89875502..7c7d0b953a 100644
--- a/libavfilter/vf_cropdetect.c
+++ b/libavfilter/vf_cropdetect.c
@@ -169,7 +169,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
 
     // ignore first 2 frames - they may be empty
     if (++s->frame_nb > 0) {
-        metadata = avpriv_frame_get_metadatap(frame);
+        metadata = &frame->metadata;
 
         // Reset the crop area every reset_count frames, if reset_count is > 0
         if (s->reset_count > 0 && s->frame_nb > s->reset_count) {
diff --git a/libavfilter/vf_deflicker.c b/libavfilter/vf_deflicker.c
index 3cda354b2b..e748109c8b 100644
--- a/libavfilter/vf_deflicker.c
+++ b/libavfilter/vf_deflicker.c
@@ -386,7 +386,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
     }
 
     av_frame_copy_props(out, in);
-    metadata = avpriv_frame_get_metadatap(out);
+    metadata = &out->metadata;
     if (metadata) {
         uint8_t value[128];
 
diff --git a/libavfilter/vf_deinterlace_vaapi.c b/libavfilter/vf_deinterlace_vaapi.c
index 91652d852b..5e7f7cf1c2 100644
--- a/libavfilter/vf_deinterlace_vaapi.c
+++ b/libavfilter/vf_deinterlace_vaapi.c
@@ -446,7 +446,7 @@ static int deint_vaapi_filter_frame(AVFilterLink *inlink, AVFrame *input_frame)
     params.surface = input_surface;
     params.surface_region = &input_region;
     params.surface_color_standard = vaapi_proc_colour_standard(
-        av_frame_get_colorspace(input_frame));
+        input_frame->colorspace);
 
     params.output_region = NULL;
     params.output_background_color = 0xff000000;
diff --git a/libavfilter/vf_drawtext.c b/libavfilter/vf_drawtext.c
index cba6cc803f..f6151443bb 100644
--- a/libavfilter/vf_drawtext.c
+++ b/libavfilter/vf_drawtext.c
@@ -1452,7 +1452,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
         NAN : frame->pts * av_q2d(inlink->time_base);
 
     s->var_values[VAR_PICT_TYPE] = frame->pict_type;
-    s->metadata = av_frame_get_metadata(frame);
+    s->metadata = frame->metadata;
 
     draw_text(ctx, frame, frame->width, frame->height);
 
diff --git a/libavfilter/vf_eq.c b/libavfilter/vf_eq.c
index c450d5ed02..e8b4a46195 100644
--- a/libavfilter/vf_eq.c
+++ b/libavfilter/vf_eq.c
@@ -254,7 +254,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
     AVFilterLink *outlink = inlink->dst->outputs[0];
     EQContext *eq = ctx->priv;
     AVFrame *out;
-    int64_t pos = av_frame_get_pkt_pos(in);
+    int64_t pos = in->pkt_pos;
     const AVPixFmtDescriptor *desc;
     int i;
 
diff --git a/libavfilter/vf_idet.c b/libavfilter/vf_idet.c
index 87d4144e9e..14f031aaa9 100644
--- a/libavfilter/vf_idet.c
+++ b/libavfilter/vf_idet.c
@@ -120,7 +120,7 @@ static void filter(AVFilterContext *ctx)
     Type type, best_type;
     RepeatedField repeat;
     int match = 0;
-    AVDictionary **metadata = avpriv_frame_get_metadatap(idet->cur);
+    AVDictionary **metadata = &idet->cur->metadata;
 
     for (i = 0; i < idet->csp->nb_components; i++) {
         int w = idet->cur->width;
diff --git a/libavfilter/vf_ocr.c b/libavfilter/vf_ocr.c
index 870dd68841..e003982f05 100644
--- a/libavfilter/vf_ocr.c
+++ b/libavfilter/vf_ocr.c
@@ -97,7 +97,7 @@ static int query_formats(AVFilterContext *ctx)
 
 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
 {
-    AVDictionary **metadata = avpriv_frame_get_metadatap(in);
+    AVDictionary **metadata = &in->metadata;
     AVFilterContext *ctx = inlink->dst;
     AVFilterLink *outlink = ctx->outputs[0];
     OCRContext *s = ctx->priv;
diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c
index bbcd6b55cd..37b799fcae 100644
--- a/libavfilter/vf_overlay.c
+++ b/libavfilter/vf_overlay.c
@@ -715,7 +715,7 @@ static AVFrame *do_blend(AVFilterContext *ctx, AVFrame *mainpic,
     AVFilterLink *inlink = ctx->inputs[0];
 
     if (s->eval_mode == EVAL_MODE_FRAME) {
-        int64_t pos = av_frame_get_pkt_pos(mainpic);
+        int64_t pos = mainpic->pkt_pos;
 
         s->var_values[VAR_N] = inlink->frame_count_out;
         s->var_values[VAR_T] = mainpic->pts == AV_NOPTS_VALUE ?
diff --git a/libavfilter/vf_psnr.c b/libavfilter/vf_psnr.c
index 1201b2cd7c..20962c41f6 100644
--- a/libavfilter/vf_psnr.c
+++ b/libavfilter/vf_psnr.c
@@ -148,7 +148,7 @@ static AVFrame *do_psnr(AVFilterContext *ctx, AVFrame *main,
     PSNRContext *s = ctx->priv;
     double comp_mse[4], mse = 0;
     int j, c;
-    AVDictionary **metadata = avpriv_frame_get_metadatap(main);
+    AVDictionary **metadata = &main->metadata;
 
     compute_images_mse(s, (const uint8_t **)main->data, main->linesize,
                           (const uint8_t **)ref->data, ref->linesize,
diff --git a/libavfilter/vf_readeia608.c b/libavfilter/vf_readeia608.c
index 4bfe9cfe75..bc3abe7c4d 100644
--- a/libavfilter/vf_readeia608.c
+++ b/libavfilter/vf_readeia608.c
@@ -214,11 +214,11 @@ static void extract_line(AVFilterContext *ctx, AVFilterLink *inlink, AVFrame *in
 
         snprintf(key, sizeof(key), "lavfi.readeia608.%d.cc", s->nb_found);
         snprintf(value, sizeof(value), "0x%02X%02X", byte[0], byte[1]);
-        av_dict_set(avpriv_frame_get_metadatap(in), key, value, 0);
+        av_dict_set(&in->metadata, key, value, 0);
 
         snprintf(key, sizeof(key), "lavfi.readeia608.%d.line", s->nb_found);
         snprintf(value, sizeof(value), "%d", line);
-        av_dict_set(avpriv_frame_get_metadatap(in), key, value, 0);
+        av_dict_set(&in->metadata, key, value, 0);
     }
 
     s->nb_found++;
diff --git a/libavfilter/vf_readvitc.c b/libavfilter/vf_readvitc.c
index d70af6a9ee..7ef8cdae58 100644
--- a/libavfilter/vf_readvitc.c
+++ b/libavfilter/vf_readvitc.c
@@ -221,9 +221,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
     int found;
 
     found = read_vitc_line(s, frame->data[0], frame->linesize[0], inlink->w, inlink->h);
-    av_dict_set(avpriv_frame_get_metadatap(frame), "lavfi.readvitc.found", (found ? "1" : "0"), 0);
+    av_dict_set(&frame->metadata, "lavfi.readvitc.found", (found ? "1" : "0"), 0);
     if (found)
-        av_dict_set(avpriv_frame_get_metadatap(frame), "lavfi.readvitc.tc_str", make_vitc_tc_string(s->tcbuf, s->line_data), 0);
+        av_dict_set(&frame->metadata, "lavfi.readvitc.tc_str", make_vitc_tc_string(s->tcbuf, s->line_data), 0);
 
     return ff_filter_frame(outlink, frame);
 }
diff --git a/libavfilter/vf_scale.c b/libavfilter/vf_scale.c
index 2fe9a1fb52..c59ac6b0ea 100644
--- a/libavfilter/vf_scale.c
+++ b/libavfilter/vf_scale.c
@@ -409,7 +409,7 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
     char buf[32];
     int in_range;
 
-    if (av_frame_get_colorspace(in) == AVCOL_SPC_YCGCO)
+    if (in->colorspace == AVCOL_SPC_YCGCO)
         av_log(link->dst, AV_LOG_WARNING, "Detected unsupported YCgCo colorspace.\n");
 
     if(   in->width  != link->w
@@ -456,7 +456,7 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
     if(scale->output_is_pal)
         avpriv_set_systematic_pal2((uint32_t*)out->data[1], outlink->format == AV_PIX_FMT_PAL8 ? AV_PIX_FMT_BGR8 : outlink->format);
 
-    in_range = av_frame_get_color_range(in);
+    in_range = in->color_range;
 
     if (   scale->in_color_matrix
         || scale->out_color_matrix
@@ -471,7 +471,7 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
                                  &brightness, &contrast, &saturation);
 
         if (scale->in_color_matrix)
-            inv_table = parse_yuv_type(scale->in_color_matrix, av_frame_get_colorspace(in));
+            inv_table = parse_yuv_type(scale->in_color_matrix, in->colorspace);
         if (scale->out_color_matrix)
             table     = parse_yuv_type(scale->out_color_matrix, AVCOL_SPC_UNSPECIFIED);
         else if (scale->in_color_matrix)
@@ -496,7 +496,7 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
                                      table, out_full,
                                      brightness, contrast, saturation);
 
-        av_frame_set_color_range(out, out_full ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG);
+        out->color_range = out_full ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
     }
 
     av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
diff --git a/libavfilter/vf_showinfo.c b/libavfilter/vf_showinfo.c
index 83d941c629..14b8aa4aff 100644
--- a/libavfilter/vf_showinfo.c
+++ b/libavfilter/vf_showinfo.c
@@ -108,7 +108,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
            "fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c "
            "checksum:%08"PRIX32" plane_checksum:[%08"PRIX32,
            inlink->frame_count_out,
-           av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base), av_frame_get_pkt_pos(frame),
+           av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base), frame->pkt_pos,
            desc->name,
            frame->sample_aspect_ratio.num, frame->sample_aspect_ratio.den,
            frame->width, frame->height,
diff --git a/libavfilter/vf_ssim.c b/libavfilter/vf_ssim.c
index cf925bd211..fa2c2c1406 100644
--- a/libavfilter/vf_ssim.c
+++ b/libavfilter/vf_ssim.c
@@ -283,7 +283,7 @@ static double ssim_db(double ssim, double weight)
 static AVFrame *do_ssim(AVFilterContext *ctx, AVFrame *main,
                         const AVFrame *ref)
 {
-    AVDictionary **metadata = avpriv_frame_get_metadatap(main);
+    AVDictionary **metadata = &main->metadata;
     SSIMContext *s = ctx->priv;
     float c[4], ssimv = 0.0;
     int i;
diff --git a/libavfilter/vf_swaprect.c b/libavfilter/vf_swaprect.c
index a0aa59d236..f96f897818 100644
--- a/libavfilter/vf_swaprect.c
+++ b/libavfilter/vf_swaprect.c
@@ -99,7 +99,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
     var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR];
     var_values[VAR_N]   = inlink->frame_count_out;
     var_values[VAR_T]   = in->pts == AV_NOPTS_VALUE ? NAN : in->pts * av_q2d(inlink->time_base);
-    var_values[VAR_POS] = av_frame_get_pkt_pos(in) == -1 ? NAN : av_frame_get_pkt_pos(in);
+    var_values[VAR_POS] = in->pkt_pos ? NAN : in->pkt_pos;
 
     ret = av_expr_parse_and_eval(&dw, s->w,
                                  var_names, &var_values[0],
diff --git a/libavfilter/vf_vectorscope.c b/libavfilter/vf_vectorscope.c
index 987bc66bd4..8c596c7a88 100644
--- a/libavfilter/vf_vectorscope.c
+++ b/libavfilter/vf_vectorscope.c
@@ -1212,7 +1212,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
     if (s->colorspace) {
         s->cs = (s->depth - 8) * 2 + s->colorspace - 1;
     } else {
-        switch (av_frame_get_colorspace(in)) {
+        switch (in->colorspace) {
         case AVCOL_SPC_SMPTE170M:
         case AVCOL_SPC_BT470BG:
             s->cs = (s->depth - 8) * 2 + 0;
diff --git a/libavfilter/vf_waveform.c b/libavfilter/vf_waveform.c
index 70995eebee..44e9bf4dc2 100644
--- a/libavfilter/vf_waveform.c
+++ b/libavfilter/vf_waveform.c
@@ -2754,7 +2754,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
         return AVERROR(ENOMEM);
     }
     out->pts = in->pts;
-    av_frame_set_color_range(out, AVCOL_RANGE_JPEG);
+    out->color_range = AVCOL_RANGE_JPEG;
 
     for (k = 0; k < s->dcomp; k++) {
         if (s->bits <= 8) {
diff --git a/libavfilter/vsrc_testsrc.c b/libavfilter/vsrc_testsrc.c
index 422f6d8c54..c4a5ae3742 100644
--- a/libavfilter/vsrc_testsrc.c
+++ b/libavfilter/vsrc_testsrc.c
@@ -1376,7 +1376,7 @@ static void smptebars_fill_picture(AVFilterContext *ctx, AVFrame *picref)
     int r_w, r_h, w_h, p_w, p_h, i, tmp, x = 0;
     const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(picref->format);
 
-    av_frame_set_colorspace(picref, AVCOL_SPC_BT470BG);
+    picref->colorspace = AVCOL_SPC_BT470BG;
 
     r_w = FFALIGN((test->w + 6) / 7, 1 << pixdesc->log2_chroma_w);
     r_h = FFALIGN(test->h * 2 / 3, 1 << pixdesc->log2_chroma_h);
@@ -1443,7 +1443,7 @@ static void smptehdbars_fill_picture(AVFilterContext *ctx, AVFrame *picref)
     int d_w, r_w, r_h, l_w, i, tmp, x = 0, y = 0;
     const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(picref->format);
 
-    av_frame_set_colorspace(picref, AVCOL_SPC_BT709);
+    picref->colorspace = AVCOL_SPC_BT709;
 
     d_w = FFALIGN(test->w / 8, 1 << pixdesc->log2_chroma_w);
     r_h = FFALIGN(test->h * 7 / 12, 1 << pixdesc->log2_chroma_h);




More information about the ffmpeg-cvslog mailing list