FFmpeg
af_join.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 /**
20  * @file
21  * Audio join filter
22  *
23  * Join multiple audio inputs as different channels in
24  * a single output
25  */
26 
27 #include "libavutil/avassert.h"
28 #include "libavutil/avstring.h"
30 #include "libavutil/common.h"
31 #include "libavutil/opt.h"
32 
33 #include "audio.h"
34 #include "avfilter.h"
35 #include "formats.h"
36 #include "filters.h"
37 #include "internal.h"
38 
39 typedef struct ChannelMap {
40  int input; ///< input stream index
41  int in_channel_idx; ///< index of in_channel in the input stream data
42  uint64_t in_channel; ///< layout describing the input channel
43  uint64_t out_channel; ///< layout describing the output channel
44 } ChannelMap;
45 
46 typedef struct JoinContext {
47  const AVClass *class;
48 
49  int inputs;
50  char *map;
52  uint64_t channel_layout;
53 
56 
57  /**
58  * Temporary storage for input frames, until we get one on each input.
59  */
61 
62  /**
63  * Temporary storage for buffer references, for assembling the output frame.
64  */
66 } JoinContext;
67 
68 #define OFFSET(x) offsetof(JoinContext, x)
69 #define A AV_OPT_FLAG_AUDIO_PARAM
70 #define F AV_OPT_FLAG_FILTERING_PARAM
71 static const AVOption join_options[] = {
72  { "inputs", "Number of input streams.", OFFSET(inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT_MAX, A|F },
73  { "channel_layout", "Channel layout of the "
74  "output stream.", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, {.str = "stereo"}, 0, 0, A|F },
75  { "map", "A comma-separated list of channels maps in the format "
76  "'input_stream.input_channel-output_channel.",
77  OFFSET(map), AV_OPT_TYPE_STRING, .flags = A|F },
78  { NULL }
79 };
80 
82 
84 {
85  JoinContext *s = ctx->priv;
86  char separator = '|';
87  char *cur = s->map;
88 
89  while (cur && *cur) {
90  char *sep, *next, *p;
91  uint64_t in_channel = 0, out_channel = 0;
92  int input_idx, out_ch_idx, in_ch_idx;
93 
94  next = strchr(cur, separator);
95  if (next)
96  *next++ = 0;
97 
98  /* split the map into input and output parts */
99  if (!(sep = strchr(cur, '-'))) {
100  av_log(ctx, AV_LOG_ERROR, "Missing separator '-' in channel "
101  "map '%s'\n", cur);
102  return AVERROR(EINVAL);
103  }
104  *sep++ = 0;
105 
106 #define PARSE_CHANNEL(str, var, inout) \
107  if (!(var = av_get_channel_layout(str))) { \
108  av_log(ctx, AV_LOG_ERROR, "Invalid " inout " channel: %s.\n", str);\
109  return AVERROR(EINVAL); \
110  } \
111  if (av_get_channel_layout_nb_channels(var) != 1) { \
112  av_log(ctx, AV_LOG_ERROR, "Channel map describes more than one " \
113  inout " channel.\n"); \
114  return AVERROR(EINVAL); \
115  }
116 
117  /* parse output channel */
118  PARSE_CHANNEL(sep, out_channel, "output");
119  if (!(out_channel & s->channel_layout)) {
120  av_log(ctx, AV_LOG_ERROR, "Output channel '%s' is not present in "
121  "requested channel layout.\n", sep);
122  return AVERROR(EINVAL);
123  }
124 
126  out_channel);
127  if (s->channels[out_ch_idx].input >= 0) {
128  av_log(ctx, AV_LOG_ERROR, "Multiple maps for output channel "
129  "'%s'.\n", sep);
130  return AVERROR(EINVAL);
131  }
132 
133  /* parse input channel */
134  input_idx = strtol(cur, &cur, 0);
135  if (input_idx < 0 || input_idx >= s->inputs) {
136  av_log(ctx, AV_LOG_ERROR, "Invalid input stream index: %d.\n",
137  input_idx);
138  return AVERROR(EINVAL);
139  }
140 
141  if (*cur)
142  cur++;
143 
144  in_ch_idx = strtol(cur, &p, 0);
145  if (p == cur) {
146  /* channel specifier is not a number,
147  * try to parse as channel name */
148  PARSE_CHANNEL(cur, in_channel, "input");
149  }
150 
151  s->channels[out_ch_idx].input = input_idx;
152  if (in_channel)
153  s->channels[out_ch_idx].in_channel = in_channel;
154  else
155  s->channels[out_ch_idx].in_channel_idx = in_ch_idx;
156 
157  cur = next;
158  }
159  return 0;
160 }
161 
163 {
164  JoinContext *s = ctx->priv;
165  int ret, i;
166 
168  av_log(ctx, AV_LOG_ERROR, "Error parsing channel layout '%s'.\n",
169  s->channel_layout_str);
170  return AVERROR(EINVAL);
171  }
172 
174  s->channels = av_mallocz_array(s->nb_channels, sizeof(*s->channels));
175  s->buffers = av_mallocz_array(s->nb_channels, sizeof(*s->buffers));
176  s->input_frames = av_mallocz_array(s->inputs, sizeof(*s->input_frames));
177  if (!s->channels || !s->buffers|| !s->input_frames)
178  return AVERROR(ENOMEM);
179 
180  for (i = 0; i < s->nb_channels; i++) {
182  s->channels[i].input = -1;
183  }
184 
185  if ((ret = parse_maps(ctx)) < 0)
186  return ret;
187 
188  for (i = 0; i < s->inputs; i++) {
189  AVFilterPad pad = { 0 };
190 
191  pad.type = AVMEDIA_TYPE_AUDIO;
192  pad.name = av_asprintf("input%d", i);
193  if (!pad.name)
194  return AVERROR(ENOMEM);
195 
196  if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0) {
197  av_freep(&pad.name);
198  return ret;
199  }
200  }
201 
202  return 0;
203 }
204 
206 {
207  JoinContext *s = ctx->priv;
208  int i;
209 
210  for (i = 0; i < ctx->nb_inputs; i++) {
211  av_freep(&ctx->input_pads[i].name);
212  av_frame_free(&s->input_frames[i]);
213  }
214 
215  av_freep(&s->channels);
216  av_freep(&s->buffers);
217  av_freep(&s->input_frames);
218 }
219 
221 {
222  JoinContext *s = ctx->priv;
224  int i, ret;
225 
226  if ((ret = ff_add_channel_layout(&layouts, s->channel_layout)) < 0 ||
227  (ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts)) < 0)
228  return ret;
229 
230  for (i = 0; i < ctx->nb_inputs; i++) {
231  layouts = ff_all_channel_layouts();
232  if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts)) < 0)
233  return ret;
234  }
235 
236  if ((ret = ff_set_common_formats(ctx, ff_planar_sample_fmts())) < 0 ||
237  (ret = ff_set_common_samplerates(ctx, ff_all_samplerates())) < 0)
238  return ret;
239 
240  return 0;
241 }
242 
244  uint64_t *inputs)
245 {
246  int i;
247 
248  for (i = 0; i < ctx->nb_inputs; i++) {
249  AVFilterLink *link = ctx->inputs[i];
250 
251  if (ch->out_channel & link->channel_layout &&
252  !(ch->out_channel & inputs[i])) {
253  ch->input = i;
254  ch->in_channel = ch->out_channel;
255  inputs[i] |= ch->out_channel;
256  return;
257  }
258  }
259 }
260 
262  uint64_t *inputs)
263 {
264  int i;
265 
266  for (i = 0; i < ctx->nb_inputs; i++) {
267  AVFilterLink *link = ctx->inputs[i];
268 
269  if ((inputs[i] & link->channel_layout) != link->channel_layout) {
270  uint64_t unused = link->channel_layout & ~inputs[i];
271 
272  ch->input = i;
274  inputs[i] |= ch->in_channel;
275  return;
276  }
277  }
278 }
279 
280 static int join_config_output(AVFilterLink *outlink)
281 {
282  AVFilterContext *ctx = outlink->src;
283  JoinContext *s = ctx->priv;
284  uint64_t *inputs; // nth element tracks which channels are used from nth input
285  int i, ret = 0;
286 
287  /* initialize inputs to user-specified mappings */
288  if (!(inputs = av_mallocz_array(ctx->nb_inputs, sizeof(*inputs))))
289  return AVERROR(ENOMEM);
290  for (i = 0; i < s->nb_channels; i++) {
291  ChannelMap *ch = &s->channels[i];
293 
294  if (ch->input < 0)
295  continue;
296 
297  inlink = ctx->inputs[ch->input];
298 
299  if (!ch->in_channel)
301  ch->in_channel_idx);
302 
303  if (!(ch->in_channel & inlink->channel_layout)) {
304  av_log(ctx, AV_LOG_ERROR, "Requested channel %s is not present in "
305  "input stream #%d.\n", av_get_channel_name(ch->in_channel),
306  ch->input);
307  ret = AVERROR(EINVAL);
308  goto fail;
309  }
310 
311  inputs[ch->input] |= ch->in_channel;
312  }
313 
314  /* guess channel maps when not explicitly defined */
315  /* first try unused matching channels */
316  for (i = 0; i < s->nb_channels; i++) {
317  ChannelMap *ch = &s->channels[i];
318 
319  if (ch->input < 0)
320  guess_map_matching(ctx, ch, inputs);
321  }
322 
323  /* if the above failed, try to find _any_ unused input channel */
324  for (i = 0; i < s->nb_channels; i++) {
325  ChannelMap *ch = &s->channels[i];
326 
327  if (ch->input < 0)
328  guess_map_any(ctx, ch, inputs);
329 
330  if (ch->input < 0) {
331  av_log(ctx, AV_LOG_ERROR, "Could not find input channel for "
332  "output channel '%s'.\n",
334  goto fail;
335  }
336 
338  ch->in_channel);
339  }
340 
341  /* print mappings */
342  av_log(ctx, AV_LOG_VERBOSE, "mappings: ");
343  for (i = 0; i < s->nb_channels; i++) {
344  ChannelMap *ch = &s->channels[i];
345  av_log(ctx, AV_LOG_VERBOSE, "%d.%s => %s ", ch->input,
348  }
349  av_log(ctx, AV_LOG_VERBOSE, "\n");
350 
351  for (i = 0; i < ctx->nb_inputs; i++) {
352  if (!inputs[i])
353  av_log(ctx, AV_LOG_WARNING, "No channels are used from input "
354  "stream %d.\n", i);
355  }
356 
357 fail:
358  av_freep(&inputs);
359  return ret;
360 }
361 
363 {
364  AVFilterLink *outlink = ctx->outputs[0];
365  JoinContext *s = ctx->priv;
366  AVFrame *frame;
367  int linesize = INT_MAX;
368  int nb_samples = INT_MAX;
369  int nb_buffers = 0;
370  int i, j, ret;
371 
372  for (i = 0; i < ctx->nb_inputs; i++) {
373  if (!s->input_frames[i])
374  return 0;
375  nb_samples = FFMIN(nb_samples, s->input_frames[i]->nb_samples);
376  }
377  if (!nb_samples)
378  return 0;
379 
380  /* setup the output frame */
381  frame = av_frame_alloc();
382  if (!frame)
383  return AVERROR(ENOMEM);
384  if (s->nb_channels > FF_ARRAY_ELEMS(frame->data)) {
386  sizeof(*frame->extended_data));
387  if (!frame->extended_data) {
388  ret = AVERROR(ENOMEM);
389  goto fail;
390  }
391  }
392 
393  /* copy the data pointers */
394  for (i = 0; i < s->nb_channels; i++) {
395  ChannelMap *ch = &s->channels[i];
396  AVFrame *cur = s->input_frames[ch->input];
397  AVBufferRef *buf;
398 
399  frame->extended_data[i] = cur->extended_data[ch->in_channel_idx];
400  linesize = FFMIN(linesize, cur->linesize[0]);
401 
402  /* add the buffer where this plan is stored to the list if it's
403  * not already there */
405  if (!buf) {
406  ret = AVERROR(EINVAL);
407  goto fail;
408  }
409  for (j = 0; j < nb_buffers; j++)
410  if (s->buffers[j]->buffer == buf->buffer)
411  break;
412  if (j == i)
413  s->buffers[nb_buffers++] = buf;
414  }
415 
416  /* create references to the buffers we copied to output */
417  if (nb_buffers > FF_ARRAY_ELEMS(frame->buf)) {
418  frame->nb_extended_buf = nb_buffers - FF_ARRAY_ELEMS(frame->buf);
420  sizeof(*frame->extended_buf));
421  if (!frame->extended_buf) {
422  frame->nb_extended_buf = 0;
423  ret = AVERROR(ENOMEM);
424  goto fail;
425  }
426  }
427  for (i = 0; i < FFMIN(FF_ARRAY_ELEMS(frame->buf), nb_buffers); i++) {
428  frame->buf[i] = av_buffer_ref(s->buffers[i]);
429  if (!frame->buf[i]) {
430  ret = AVERROR(ENOMEM);
431  goto fail;
432  }
433  }
434  for (i = 0; i < frame->nb_extended_buf; i++) {
435  frame->extended_buf[i] = av_buffer_ref(s->buffers[i +
436  FF_ARRAY_ELEMS(frame->buf)]);
437  if (!frame->extended_buf[i]) {
438  ret = AVERROR(ENOMEM);
439  goto fail;
440  }
441  }
442 
443  frame->nb_samples = nb_samples;
444  frame->channel_layout = outlink->channel_layout;
445  frame->channels = outlink->channels;
446  frame->sample_rate = outlink->sample_rate;
447  frame->format = outlink->format;
448  frame->pts = s->input_frames[0]->pts;
449  frame->linesize[0] = linesize;
450  if (frame->data != frame->extended_data) {
451  memcpy(frame->data, frame->extended_data, sizeof(*frame->data) *
452  FFMIN(FF_ARRAY_ELEMS(frame->data), s->nb_channels));
453  }
454 
455  ret = ff_filter_frame(outlink, frame);
456 
457  for (i = 0; i < ctx->nb_inputs; i++)
458  av_frame_free(&s->input_frames[i]);
459 
460  return ret;
461 
462 fail:
463  av_frame_free(&frame);
464  return ret;
465 }
466 
468 {
469  JoinContext *s = ctx->priv;
470  int i, ret, status;
471  int nb_samples = 0;
472  int64_t pts;
473 
475 
476  if (!s->input_frames[0]) {
477  ret = ff_inlink_consume_frame(ctx->inputs[0], &s->input_frames[0]);
478  if (ret < 0) {
479  return ret;
480  } else if (ff_inlink_acknowledge_status(ctx->inputs[0], &status, &pts)) {
481  ff_outlink_set_status(ctx->outputs[0], status, pts);
482  return 0;
483  } else {
484  if (ff_outlink_frame_wanted(ctx->outputs[0]) && !s->input_frames[0]) {
486  return 0;
487  }
488  }
489  if (!s->input_frames[0]) {
490  return 0;
491  }
492  }
493 
494  nb_samples = s->input_frames[0]->nb_samples;
495 
496  for (i = 1; i < ctx->nb_inputs && nb_samples > 0; i++) {
497  if (s->input_frames[i])
498  continue;
499 
500  if (ff_inlink_check_available_samples(ctx->inputs[i], nb_samples) > 0) {
501  ret = ff_inlink_consume_samples(ctx->inputs[i], nb_samples, nb_samples, &s->input_frames[i]);
502  if (ret < 0) {
503  return ret;
504  } else if (ff_inlink_acknowledge_status(ctx->inputs[i], &status, &pts)) {
505  ff_outlink_set_status(ctx->outputs[0], status, pts);
506  return 0;
507  }
508  } else {
509  if (ff_outlink_frame_wanted(ctx->outputs[0])) {
511  return 0;
512  }
513  }
514  }
515 
516  return try_push_frame(ctx);
517 }
518 
520  {
521  .name = "default",
522  .type = AVMEDIA_TYPE_AUDIO,
523  .config_props = join_config_output,
524  },
525  { NULL }
526 };
527 
529  .name = "join",
530  .description = NULL_IF_CONFIG_SMALL("Join multiple audio streams into "
531  "multi-channel output."),
532  .priv_size = sizeof(JoinContext),
533  .priv_class = &join_class,
534  .init = join_init,
535  .uninit = join_uninit,
536  .activate = activate,
538  .inputs = NULL,
539  .outputs = avfilter_af_join_outputs,
541 };
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link&#39;s FIFO and update the link&#39;s stats.
Definition: avfilter.c:1494
#define NULL
Definition: coverity.c:32
uint64_t in_channel
layout describing the input channel
Definition: af_channelmap.c:41
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
AVOption.
Definition: opt.h:246
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
Definition: filters.h:212
Main libavfilter public API header.
#define A
Definition: af_join.c:69
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:486
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:105
int nb_extended_buf
Number of elements in extended_buf.
Definition: frame.h:504
enum AVMediaType type
AVFilterPad type.
Definition: internal.h:65
int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min)
Test if enough samples are available on the link.
Definition: avfilter.c:1479
AVBufferRef ** buffers
Temporary storage for buffer references, for assembling the output frame.
Definition: af_join.c:65
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1620
char * map
Definition: af_join.c:50
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
const char * name
Pad name.
Definition: internal.h:60
int nb_channels
Definition: af_join.c:54
uint64_t av_get_channel_layout(const char *name)
Return a channel layout id that matches name, or 0 if no match is found.
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:434
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1093
#define av_cold
Definition: attributes.h:82
static av_cold void join_uninit(AVFilterContext *ctx)
Definition: af_join.c:205
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
static av_cold int uninit(AVCodecContext *avctx)
Definition: crystalhd.c:279
AVOptions.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
static av_cold int join_init(AVFilterContext *ctx)
Definition: af_join.c:162
static void guess_map_matching(AVFilterContext *ctx, ChannelMap *ch, uint64_t *inputs)
Definition: af_join.c:243
int input
input stream index
Definition: af_join.c:40
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function.If this function returns true
#define av_log(a,...)
static int join_query_formats(AVFilterContext *ctx)
Definition: af_join.c:220
A filter pad used for either input or output.
Definition: internal.h:54
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1449
AVFilterPad * input_pads
array of input pads
Definition: avfilter.h:345
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:569
int ff_add_channel_layout(AVFilterChannelLayouts **l, uint64_t channel_layout)
Definition: formats.c:342
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
simple assert() macros that are a bit more flexible than ISO C assert().
#define fail()
Definition: checkasm.h:122
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:472
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
static const AVOption join_options[]
Definition: af_join.c:71
int channels
number of audio channels, only used for audio.
Definition: frame.h:601
audio channel layout utility functions
unsigned nb_inputs
number of input pads
Definition: avfilter.h:347
#define FFMIN(a, b)
Definition: common.h:96
AVBufferRef ** extended_buf
For planar audio which requires more than AV_NUM_DATA_POINTERS AVBufferRef pointers, this array will hold all the references which cannot fit into AVFrame.buf.
Definition: frame.h:500
AVFormatContext * ctx
Definition: movenc.c:48
AVBufferRef * av_frame_get_plane_buffer(AVFrame *frame, int plane)
Get the buffer reference a given data plane is stored in.
Definition: frame.c:659
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
AVFilterFormats * ff_planar_sample_fmts(void)
Construct a formats list containing all planar sample formats.
Definition: formats.c:381
AVFilter ff_af_join
Definition: af_join.c:528
AVFilterChannelLayouts * ff_all_channel_layouts(void)
Construct an empty AVFilterChannelLayouts/AVFilterFormats struct – representing any channel layout (...
Definition: formats.c:400
static const AVFilterPad avfilter_af_join_outputs[]
Definition: af_join.c:519
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
#define FF_ARRAY_ELEMS(a)
int in_channel_idx
index of in_channel in the input stream data
Definition: af_channelmap.c:43
A list of supported channel layouts.
Definition: formats.h:85
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:368
static int activate(AVFilterContext *ctx)
Definition: af_join.c:467
AVFILTER_DEFINE_CLASS(join)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link&#39;s FIFO and update the link&#39;s stats.
Definition: avfilter.c:1513
int inputs
Definition: af_join.c:49
void * buf
Definition: avisynth_c.h:766
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
AVBuffer * buffer
Definition: buffer.h:82
Describe the class of an AVClass context structure.
Definition: log.h:67
int sample_rate
Sample rate of the audio data.
Definition: frame.h:467
#define PARSE_CHANNEL(str, var, inout)
Filter definition.
Definition: avfilter.h:144
uint64_t out_channel
layout describing the output channel
Definition: af_channelmap.c:42
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
const char * name
Filter name.
Definition: avfilter.h:148
const VDPAUPixFmtMap * map
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
int av_get_channel_layout_channel_index(uint64_t channel_layout, uint64_t channel)
Get the index of a channel in channel_layout.
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:394
static int64_t pts
#define flags(name, subs,...)
Definition: cbs_av1.c:561
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
AVFrame ** input_frames
Temporary storage for input frames, until we get one on each input.
Definition: af_join.c:60
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
ChannelMap * channels
Definition: af_join.c:55
#define F
Definition: af_join.c:70
A reference to a data buffer.
Definition: buffer.h:81
#define OFFSET(x)
Definition: af_join.c:68
static int query_formats(AVFilterContext *ctx)
Definition: aeval.c:244
static void guess_map_any(AVFilterContext *ctx, ChannelMap *ch, uint64_t *inputs)
Definition: af_join.c:261
common internal and external API header
uint64_t av_channel_layout_extract_channel(uint64_t channel_layout, int index)
Get the channel with the given index in channel_layout.
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
static int parse_maps(AVFilterContext *ctx)
Definition: af_join.c:83
char * channel_layout_str
Definition: af_join.c:51
const char * av_get_channel_name(uint64_t channel)
Get the name of a given channel.
static int try_push_frame(AVFilterContext *ctx)
Definition: af_join.c:362
An instance of a filter.
Definition: avfilter.h:338
#define av_freep(p)
static int join_config_output(AVFilterLink *outlink)
Definition: af_join.c:280
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:342
uint64_t channel_layout
Definition: af_join.c:52
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:361
int ff_set_common_samplerates(AVFilterContext *ctx, AVFilterFormats *samplerates)
Definition: formats.c:557
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:191
static int ff_insert_inpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new input pad for the filter.
Definition: internal.h:277