FFmpeg
vf_alphamerge.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012 Steven Robertson
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * copy an alpha component from another video's luma
24  */
25 
26 #include <string.h>
27 
28 #include "libavutil/imgutils.h"
29 #include "libavutil/opt.h"
30 #include "libavutil/pixfmt.h"
31 #include "avfilter.h"
32 #include "drawutils.h"
33 #include "formats.h"
34 #include "filters.h"
35 #include "framesync.h"
36 #include "internal.h"
37 #include "video.h"
38 
39 enum { Y, U, V, A };
40 
41 typedef struct AlphaMergeContext {
42  const AVClass *class;
43 
46 
49 
51 {
52  AVFilterContext *ctx = fs->parent;
53  AlphaMergeContext *s = ctx->priv;
54  AVFrame *main_buf, *alpha_buf;
55  int ret;
56 
57  ret = ff_framesync_dualinput_get_writable(fs, &main_buf, &alpha_buf);
58  if (ret < 0)
59  return ret;
60  if (!alpha_buf)
61  return ff_filter_frame(ctx->outputs[0], main_buf);
62 
63  if (s->is_packed_rgb) {
64  int x, y;
65  uint8_t *pin, *pout;
66  for (y = 0; y < main_buf->height; y++) {
67  pin = alpha_buf->data[0] + y * alpha_buf->linesize[0];
68  pout = main_buf->data[0] + y * main_buf->linesize[0] + s->rgba_map[A];
69  for (x = 0; x < main_buf->width; x++) {
70  *pout = *pin;
71  pin += 1;
72  pout += 4;
73  }
74  }
75  } else {
76  const int main_linesize = main_buf->linesize[A];
77  const int alpha_linesize = alpha_buf->linesize[Y];
78  av_image_copy_plane(main_buf->data[A], main_linesize,
79  alpha_buf->data[Y], alpha_linesize,
80  FFMIN(main_linesize, alpha_linesize), alpha_buf->height);
81  }
82 
83  return ff_filter_frame(ctx->outputs[0], main_buf);
84 }
85 
87 {
88  AlphaMergeContext *s = ctx->priv;
89 
91  return 0;
92 }
93 
95 {
96  static const enum AVPixelFormat main_fmts[] = {
101  };
102  static const enum AVPixelFormat alpha_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
103  AVFilterFormats *main_formats = ff_make_format_list(main_fmts);
104  int ret;
105 
106  if ((ret = ff_formats_ref(main_formats, &ctx->inputs[0]->outcfg.formats)) < 0 ||
107  (ret = ff_formats_ref(main_formats, &ctx->outputs[0]->incfg.formats)) < 0)
108  return ret;
109 
110  return ff_formats_ref(ff_make_format_list(alpha_fmts),
111  &ctx->inputs[1]->outcfg.formats);
112 }
113 
115 {
116  AlphaMergeContext *s = inlink->dst->priv;
117  s->is_packed_rgb =
118  ff_fill_rgba_map(s->rgba_map, inlink->format) >= 0 &&
119  inlink->format != AV_PIX_FMT_GBRAP;
120  return 0;
121 }
122 
123 static int config_output(AVFilterLink *outlink)
124 {
125  AVFilterContext *ctx = outlink->src;
126  AlphaMergeContext *s = ctx->priv;
127  AVFilterLink *mainlink = ctx->inputs[0];
128  AVFilterLink *alphalink = ctx->inputs[1];
129  int ret;
130 
131  if (mainlink->w != alphalink->w || mainlink->h != alphalink->h) {
132  av_log(ctx, AV_LOG_ERROR,
133  "Input frame sizes do not match (%dx%d vs %dx%d).\n",
134  mainlink->w, mainlink->h,
135  alphalink->w, alphalink->h);
136  return AVERROR(EINVAL);
137  }
138 
139  if ((ret = ff_framesync_init_dualinput(&s->fs, ctx)) < 0)
140  return ret;
141 
142  outlink->w = mainlink->w;
143  outlink->h = mainlink->h;
144  outlink->time_base = mainlink->time_base;
145  outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
146  outlink->frame_rate = mainlink->frame_rate;
147 
148  return ff_framesync_configure(&s->fs);
149 }
150 
152 {
153  AlphaMergeContext *s = ctx->priv;
154  return ff_framesync_activate(&s->fs);
155 }
156 
158 {
159  AlphaMergeContext *s = ctx->priv;
160 
161  ff_framesync_uninit(&s->fs);
162 }
163 
164 static const AVFilterPad alphamerge_inputs[] = {
165  {
166  .name = "main",
167  .type = AVMEDIA_TYPE_VIDEO,
168  .config_props = config_input_main,
169  },{
170  .name = "alpha",
171  .type = AVMEDIA_TYPE_VIDEO,
172  },
173  { NULL }
174 };
175 
176 static const AVFilterPad alphamerge_outputs[] = {
177  {
178  .name = "default",
179  .type = AVMEDIA_TYPE_VIDEO,
180  .config_props = config_output,
181  },
182  { NULL }
183 };
184 
185 static const AVOption alphamerge_options[] = {
186  { NULL }
187 };
188 
190 
192  .name = "alphamerge",
193  .description = NULL_IF_CONFIG_SMALL("Copy the luma value of the second "
194  "input into the alpha channel of the first input."),
195  .preinit = alphamerge_framesync_preinit,
196  .priv_size = sizeof(AlphaMergeContext),
197  .priv_class = &alphamerge_class,
198  .init = init,
200  .inputs = alphamerge_inputs,
201  .outputs = alphamerge_outputs,
202  .uninit = uninit,
203  .activate = activate,
205 };
#define NULL
Definition: coverity.c:32
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
AVOption.
Definition: opt.h:248
misc image utilities
Main libavfilter public API header.
static const AVFilterPad alphamerge_inputs[]
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
Definition: framesync.c:124
static int activate(AVFilterContext *ctx)
FRAMESYNC_DEFINE_CLASS(alphamerge, AlphaMergeContext, fs)
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:287
const char * name
Pad name.
Definition: internal.h:60
AVFilterContext * parent
Parent filter context.
Definition: framesync.h:152
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:349
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1091
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
uint8_t
static int do_alphamerge(FFFrameSync *fs)
Definition: vf_alphamerge.c:50
#define av_cold
Definition: attributes.h:88
AVOptions.
int ff_framesync_init_dualinput(FFFrameSync *fs, AVFilterContext *parent)
Initialize a frame sync structure for dualinput.
Definition: framesync.c:358
int ff_framesync_dualinput_get_writable(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
Same as ff_framesync_dualinput_get(), but make sure that f0 is writable.
Definition: framesync.c:396
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
#define av_log(a,...)
A filter pad used for either input or output.
Definition: internal.h:54
static int config_input_main(AVFilterLink *inlink)
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:176
int width
Definition: frame.h:366
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
Definition: framesync.c:290
Frame sync structure.
Definition: framesync.h:146
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
void * priv
private data for use by the filter
Definition: avfilter.h:356
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:445
static const AVOption alphamerge_options[]
static int query_formats(AVFilterContext *ctx)
Definition: vf_alphamerge.c:94
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter&#39;s input and try to produce output.
Definition: framesync.c:341
int(* on_event)(struct FFFrameSync *fs)
Callback called when a frame event is ready.
Definition: framesync.h:172
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:92
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
#define FFMIN(a, b)
Definition: common.h:96
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:467
static av_cold void uninit(AVFilterContext *ctx)
static const AVFilterPad alphamerge_outputs[]
AVFormatContext * ctx
Definition: movenc.c:48
#define s(width, name)
Definition: cbs_vp9.c:257
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
Definition: drawutils.c:35
misc drawing utilities
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:339
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:145
static av_cold int init(AVFilterContext *ctx)
Definition: vf_alphamerge.c:86
const char * name
Filter name.
Definition: avfilter.h:149
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:134
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:353
#define flags(name, subs,...)
Definition: cbs_av1.c:560
static int config_output(AVFilterLink *outlink)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Y , 8bpp.
Definition: pixfmt.h:74
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
AVFilter ff_vf_alphamerge
pixel format definitions
FFFrameSync fs
Definition: vf_alphamerge.c:47
A list of supported formats for one end of a filter link.
Definition: formats.h:65
An instance of a filter.
Definition: avfilter.h:341
int height
Definition: frame.h:366
uint8_t rgba_map[4]
Definition: vf_alphamerge.c:45
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:373
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64