FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vf_alphamerge.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012 Steven Robertson
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * copy an alpha component from another video's luma
24  */
25 
26 #include <string.h>
27 
28 #include "libavutil/pixfmt.h"
29 #include "avfilter.h"
30 #include "bufferqueue.h"
31 #include "drawutils.h"
32 #include "formats.h"
33 #include "internal.h"
34 #include "video.h"
35 
36 enum { Y, U, V, A };
37 
38 typedef struct {
41  uint8_t rgba_map[4];
42  struct FFBufQueue queue_main;
43  struct FFBufQueue queue_alpha;
45 
46 static av_cold void uninit(AVFilterContext *ctx)
47 {
51 }
52 
54 {
55  static const enum AVPixelFormat main_fmts[] = {
59  };
60  static const enum AVPixelFormat alpha_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
61  AVFilterFormats *main_formats = ff_make_format_list(main_fmts);
62  AVFilterFormats *alpha_formats = ff_make_format_list(alpha_fmts);
63  ff_formats_ref(main_formats, &ctx->inputs[0]->out_formats);
64  ff_formats_ref(alpha_formats, &ctx->inputs[1]->out_formats);
65  ff_formats_ref(main_formats, &ctx->outputs[0]->in_formats);
66  return 0;
67 }
68 
69 static int config_input_main(AVFilterLink *inlink)
70 {
71  AlphaMergeContext *merge = inlink->dst->priv;
72  merge->is_packed_rgb =
73  ff_fill_rgba_map(merge->rgba_map, inlink->format) >= 0;
74  return 0;
75 }
76 
77 static int config_output(AVFilterLink *outlink)
78 {
79  AVFilterContext *ctx = outlink->src;
80  AVFilterLink *mainlink = ctx->inputs[0];
81  AVFilterLink *alphalink = ctx->inputs[1];
82  if (mainlink->w != alphalink->w || mainlink->h != alphalink->h) {
83  av_log(ctx, AV_LOG_ERROR,
84  "Input frame sizes do not match (%dx%d vs %dx%d).\n",
85  mainlink->w, mainlink->h,
86  alphalink->w, alphalink->h);
87  return AVERROR(EINVAL);
88  }
89 
90  outlink->w = mainlink->w;
91  outlink->h = mainlink->h;
92  outlink->time_base = mainlink->time_base;
93  outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
94  outlink->frame_rate = mainlink->frame_rate;
95  return 0;
96 }
97 
98 static void draw_frame(AVFilterContext *ctx,
99  AVFrame *main_buf,
100  AVFrame *alpha_buf)
101 {
102  AlphaMergeContext *merge = ctx->priv;
103  int h = main_buf->height;
104 
105  if (merge->is_packed_rgb) {
106  int x, y;
107  uint8_t *pin, *pout;
108  for (y = 0; y < h; y++) {
109  pin = alpha_buf->data[0] + y * alpha_buf->linesize[0];
110  pout = main_buf->data[0] + y * main_buf->linesize[0] + merge->rgba_map[A];
111  for (x = 0; x < main_buf->width; x++) {
112  *pout = *pin;
113  pin += 1;
114  pout += 4;
115  }
116  }
117  } else {
118  int y;
119  const int main_linesize = main_buf->linesize[A];
120  const int alpha_linesize = alpha_buf->linesize[Y];
121  for (y = 0; y < h && y < alpha_buf->height; y++) {
122  memcpy(main_buf->data[A] + y * main_linesize,
123  alpha_buf->data[Y] + y * alpha_linesize,
124  FFMIN(main_linesize, alpha_linesize));
125  }
126  }
127 }
128 
129 static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
130 {
131  AVFilterContext *ctx = inlink->dst;
132  AlphaMergeContext *merge = ctx->priv;
133 
134  int ret = 0;
135  int is_alpha = (inlink == ctx->inputs[1]);
136  struct FFBufQueue *queue =
137  (is_alpha ? &merge->queue_alpha : &merge->queue_main);
138  ff_bufqueue_add(ctx, queue, buf);
139 
140  do {
141  AVFrame *main_buf, *alpha_buf;
142 
143  if (!ff_bufqueue_peek(&merge->queue_main, 0) ||
144  !ff_bufqueue_peek(&merge->queue_alpha, 0)) break;
145 
146  main_buf = ff_bufqueue_get(&merge->queue_main);
147  alpha_buf = ff_bufqueue_get(&merge->queue_alpha);
148 
149  merge->frame_requested = 0;
150  draw_frame(ctx, main_buf, alpha_buf);
151  ret = ff_filter_frame(ctx->outputs[0], main_buf);
152  av_frame_free(&alpha_buf);
153  } while (ret >= 0);
154  return ret;
155 }
156 
157 static int request_frame(AVFilterLink *outlink)
158 {
159  AVFilterContext *ctx = outlink->src;
160  AlphaMergeContext *merge = ctx->priv;
161  int in, ret;
162 
163  merge->frame_requested = 1;
164  while (merge->frame_requested) {
165  in = ff_bufqueue_peek(&merge->queue_main, 0) ? 1 : 0;
166  ret = ff_request_frame(ctx->inputs[in]);
167  if (ret < 0)
168  return ret;
169  }
170  return 0;
171 }
172 
173 static const AVFilterPad alphamerge_inputs[] = {
174  {
175  .name = "main",
176  .type = AVMEDIA_TYPE_VIDEO,
177  .config_props = config_input_main,
178  .filter_frame = filter_frame,
179  .needs_writable = 1,
180  },{
181  .name = "alpha",
182  .type = AVMEDIA_TYPE_VIDEO,
183  .filter_frame = filter_frame,
184  },
185  { NULL }
186 };
187 
188 static const AVFilterPad alphamerge_outputs[] = {
189  {
190  .name = "default",
191  .type = AVMEDIA_TYPE_VIDEO,
192  .config_props = config_output,
193  .request_frame = request_frame,
194  },
195  { NULL }
196 };
197 
199  .name = "alphamerge",
200  .description = NULL_IF_CONFIG_SMALL("Copy the luma value of the second "
201  "input into the alpha channel of the first input."),
202  .uninit = uninit,
203  .priv_size = sizeof(AlphaMergeContext),
205  .inputs = alphamerge_inputs,
206  .outputs = alphamerge_outputs,
207 };
static AVFrame * ff_bufqueue_get(struct FFBufQueue *queue)
Get the first buffer from the queue and remove it.
Definition: bufferqueue.h:98
#define NULL
Definition: coverity.c:32
This structure describes decoded (raw) audio or video data.
Definition: frame.h:171
struct FFBufQueue queue_alpha
Definition: vf_alphamerge.c:43
static const AVFilterPad outputs[]
Definition: af_ashowinfo.c:248
struct FFBufQueue queue_main
Definition: vf_alphamerge.c:42
Main libavfilter public API header.
static const AVFilterPad alphamerge_inputs[]
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
Structure holding the queue.
Definition: bufferqueue.h:49
const char * name
Pad name.
Definition: internal.h:67
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:641
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1145
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:103
uint8_t
#define av_cold
Definition: attributes.h:74
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:96
#define av_log(a,...)
A filter pad used for either input or output.
Definition: internal.h:61
static int config_input_main(AVFilterLink *inlink)
Definition: vf_alphamerge.c:69
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:269
int width
width and height of the video frame
Definition: frame.h:220
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static void draw_frame(AVFilterContext *ctx, AVFrame *main_buf, AVFrame *alpha_buf)
Definition: vf_alphamerge.c:98
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:148
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:175
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:97
void * priv
private data for use by the filter
Definition: avfilter.h:654
static int query_formats(AVFilterContext *ctx)
Definition: vf_alphamerge.c:53
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:94
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:95
AVFrame * queue[FF_BUFQUEUE_SIZE]
Definition: bufferqueue.h:50
#define FFMIN(a, b)
Definition: common.h:66
float y
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:422
ret
Definition: avfilter.c:974
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_alphamerge.c:46
static const AVFilterPad alphamerge_outputs[]
static void ff_bufqueue_discard_all(struct FFBufQueue *queue)
Unref and remove all buffers from the queue.
Definition: bufferqueue.h:111
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
Definition: drawutils.c:33
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
misc drawing utilities
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:199
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:268
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
void * buf
Definition: avisynth_c.h:553
Filter definition.
Definition: avfilter.h:470
static const AVFilterPad inputs[]
Definition: af_ashowinfo.c:239
const char * name
Filter name.
Definition: avfilter.h:474
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:648
static void merge(GetBitContext *gb, uint8_t *dst, uint8_t *src, int size)
Merge two consequent lists of equal size depending on bits read.
Definition: bink.c:217
static int config_output(AVFilterLink *outlink)
Definition: vf_alphamerge.c:77
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:182
Y , 8bpp.
Definition: pixfmt.h:71
AVFilter ff_vf_alphamerge
pixel format definitions
A list of supported formats for one end of a filter link.
Definition: formats.h:64
An instance of a filter.
Definition: avfilter.h:633
int height
Definition: frame.h:220
static int request_frame(AVFilterLink *outlink)
uint8_t rgba_map[4]
Definition: vf_alphamerge.c:41
static void ff_bufqueue_add(void *log, struct FFBufQueue *queue, AVFrame *buf)
Add a buffer to the queue.
Definition: bufferqueue.h:71
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:343
internal API functions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:61
static AVFrame * ff_bufqueue_peek(struct FFBufQueue *queue, unsigned index)
Get a buffer from the queue without altering it.
Definition: bufferqueue.h:87