FFmpeg
af_join.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 /**
20  * @file
21  * Audio join filter
22  *
23  * Join multiple audio inputs as different channels in
24  * a single output
25  */
26 
27 #include "libavutil/avassert.h"
28 #include "libavutil/avstring.h"
30 #include "libavutil/common.h"
31 #include "libavutil/opt.h"
32 
33 #include "audio.h"
34 #include "avfilter.h"
35 #include "formats.h"
36 #include "filters.h"
37 #include "internal.h"
38 
39 typedef struct ChannelMap {
40  int input; ///< input stream index
41  int in_channel_idx; ///< index of in_channel in the input stream data
42  uint64_t in_channel; ///< layout describing the input channel
43  uint64_t out_channel; ///< layout describing the output channel
44 } ChannelMap;
45 
46 typedef struct JoinContext {
47  const AVClass *class;
48 
49  int inputs;
50  char *map;
52  uint64_t channel_layout;
53 
56 
57  /**
58  * Temporary storage for input frames, until we get one on each input.
59  */
61 
62  /**
63  * Temporary storage for buffer references, for assembling the output frame.
64  */
66 } JoinContext;
67 
68 #define OFFSET(x) offsetof(JoinContext, x)
69 #define A AV_OPT_FLAG_AUDIO_PARAM
70 #define F AV_OPT_FLAG_FILTERING_PARAM
71 static const AVOption join_options[] = {
72  { "inputs", "Number of input streams.", OFFSET(inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT_MAX, A|F },
73  { "channel_layout", "Channel layout of the "
74  "output stream.", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, {.str = "stereo"}, 0, 0, A|F },
75  { "map", "A comma-separated list of channels maps in the format "
76  "'input_stream.input_channel-output_channel.",
77  OFFSET(map), AV_OPT_TYPE_STRING, .flags = A|F },
78  { NULL }
79 };
80 
82 
84 {
85  JoinContext *s = ctx->priv;
86  char separator = '|';
87  char *cur = s->map;
88 
89  while (cur && *cur) {
90  char *sep, *next, *p;
91  uint64_t in_channel = 0, out_channel = 0;
92  int input_idx, out_ch_idx, in_ch_idx;
93 
94  next = strchr(cur, separator);
95  if (next)
96  *next++ = 0;
97 
98  /* split the map into input and output parts */
99  if (!(sep = strchr(cur, '-'))) {
100  av_log(ctx, AV_LOG_ERROR, "Missing separator '-' in channel "
101  "map '%s'\n", cur);
102  return AVERROR(EINVAL);
103  }
104  *sep++ = 0;
105 
106 #define PARSE_CHANNEL(str, var, inout) \
107  if (!(var = av_get_channel_layout(str))) { \
108  av_log(ctx, AV_LOG_ERROR, "Invalid " inout " channel: %s.\n", str);\
109  return AVERROR(EINVAL); \
110  } \
111  if (av_get_channel_layout_nb_channels(var) != 1) { \
112  av_log(ctx, AV_LOG_ERROR, "Channel map describes more than one " \
113  inout " channel.\n"); \
114  return AVERROR(EINVAL); \
115  }
116 
117  /* parse output channel */
118  PARSE_CHANNEL(sep, out_channel, "output");
119  if (!(out_channel & s->channel_layout)) {
120  av_log(ctx, AV_LOG_ERROR, "Output channel '%s' is not present in "
121  "requested channel layout.\n", sep);
122  return AVERROR(EINVAL);
123  }
124 
126  out_channel);
127  if (s->channels[out_ch_idx].input >= 0) {
128  av_log(ctx, AV_LOG_ERROR, "Multiple maps for output channel "
129  "'%s'.\n", sep);
130  return AVERROR(EINVAL);
131  }
132 
133  /* parse input channel */
134  input_idx = strtol(cur, &cur, 0);
135  if (input_idx < 0 || input_idx >= s->inputs) {
136  av_log(ctx, AV_LOG_ERROR, "Invalid input stream index: %d.\n",
137  input_idx);
138  return AVERROR(EINVAL);
139  }
140 
141  if (*cur)
142  cur++;
143 
144  in_ch_idx = strtol(cur, &p, 0);
145  if (p == cur) {
146  /* channel specifier is not a number,
147  * try to parse as channel name */
148  PARSE_CHANNEL(cur, in_channel, "input");
149  }
150 
151  s->channels[out_ch_idx].input = input_idx;
152  if (in_channel)
153  s->channels[out_ch_idx].in_channel = in_channel;
154  else
155  s->channels[out_ch_idx].in_channel_idx = in_ch_idx;
156 
157  cur = next;
158  }
159  return 0;
160 }
161 
163 {
164  JoinContext *s = ctx->priv;
165  int ret, i;
166 
168  av_log(ctx, AV_LOG_ERROR, "Error parsing channel layout '%s'.\n",
169  s->channel_layout_str);
170  return AVERROR(EINVAL);
171  }
172 
174  s->channels = av_mallocz_array(s->nb_channels, sizeof(*s->channels));
175  s->buffers = av_mallocz_array(s->nb_channels, sizeof(*s->buffers));
176  s->input_frames = av_mallocz_array(s->inputs, sizeof(*s->input_frames));
177  if (!s->channels || !s->buffers|| !s->input_frames)
178  return AVERROR(ENOMEM);
179 
180  for (i = 0; i < s->nb_channels; i++) {
182  s->channels[i].input = -1;
183  }
184 
185  if ((ret = parse_maps(ctx)) < 0)
186  return ret;
187 
188  for (i = 0; i < s->inputs; i++) {
189  AVFilterPad pad = { 0 };
190 
191  pad.type = AVMEDIA_TYPE_AUDIO;
192  pad.name = av_asprintf("input%d", i);
193  if (!pad.name)
194  return AVERROR(ENOMEM);
195 
196  if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0) {
197  av_freep(&pad.name);
198  return ret;
199  }
200  }
201 
202  return 0;
203 }
204 
206 {
207  JoinContext *s = ctx->priv;
208  int i;
209 
210  for (i = 0; i < s->inputs && s->input_frames; i++) {
211  av_frame_free(&s->input_frames[i]);
212  }
213 
214  for (i = 0; i < ctx->nb_inputs; i++) {
215  av_freep(&ctx->input_pads[i].name);
216  }
217 
218  av_freep(&s->channels);
219  av_freep(&s->buffers);
220  av_freep(&s->input_frames);
221 }
222 
224 {
225  JoinContext *s = ctx->priv;
227  int i, ret;
228 
229  if ((ret = ff_add_channel_layout(&layouts, s->channel_layout)) < 0 ||
230  (ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts)) < 0)
231  return ret;
232 
233  for (i = 0; i < ctx->nb_inputs; i++) {
234  layouts = ff_all_channel_layouts();
235  if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts)) < 0)
236  return ret;
237  }
238 
239  if ((ret = ff_set_common_formats(ctx, ff_planar_sample_fmts())) < 0 ||
240  (ret = ff_set_common_samplerates(ctx, ff_all_samplerates())) < 0)
241  return ret;
242 
243  return 0;
244 }
245 
247  uint64_t *inputs)
248 {
249  int i;
250 
251  for (i = 0; i < ctx->nb_inputs; i++) {
252  AVFilterLink *link = ctx->inputs[i];
253 
254  if (ch->out_channel & link->channel_layout &&
255  !(ch->out_channel & inputs[i])) {
256  ch->input = i;
257  ch->in_channel = ch->out_channel;
258  inputs[i] |= ch->out_channel;
259  return;
260  }
261  }
262 }
263 
265  uint64_t *inputs)
266 {
267  int i;
268 
269  for (i = 0; i < ctx->nb_inputs; i++) {
270  AVFilterLink *link = ctx->inputs[i];
271 
272  if ((inputs[i] & link->channel_layout) != link->channel_layout) {
273  uint64_t unused = link->channel_layout & ~inputs[i];
274 
275  ch->input = i;
277  inputs[i] |= ch->in_channel;
278  return;
279  }
280  }
281 }
282 
283 static int join_config_output(AVFilterLink *outlink)
284 {
285  AVFilterContext *ctx = outlink->src;
286  JoinContext *s = ctx->priv;
287  uint64_t *inputs; // nth element tracks which channels are used from nth input
288  int i, ret = 0;
289 
290  /* initialize inputs to user-specified mappings */
291  if (!(inputs = av_mallocz_array(ctx->nb_inputs, sizeof(*inputs))))
292  return AVERROR(ENOMEM);
293  for (i = 0; i < s->nb_channels; i++) {
294  ChannelMap *ch = &s->channels[i];
296 
297  if (ch->input < 0)
298  continue;
299 
300  inlink = ctx->inputs[ch->input];
301 
302  if (!ch->in_channel)
304  ch->in_channel_idx);
305 
306  if (!(ch->in_channel & inlink->channel_layout)) {
307  av_log(ctx, AV_LOG_ERROR, "Requested channel %s is not present in "
308  "input stream #%d.\n", av_get_channel_name(ch->in_channel),
309  ch->input);
310  ret = AVERROR(EINVAL);
311  goto fail;
312  }
313 
314  inputs[ch->input] |= ch->in_channel;
315  }
316 
317  /* guess channel maps when not explicitly defined */
318  /* first try unused matching channels */
319  for (i = 0; i < s->nb_channels; i++) {
320  ChannelMap *ch = &s->channels[i];
321 
322  if (ch->input < 0)
323  guess_map_matching(ctx, ch, inputs);
324  }
325 
326  /* if the above failed, try to find _any_ unused input channel */
327  for (i = 0; i < s->nb_channels; i++) {
328  ChannelMap *ch = &s->channels[i];
329 
330  if (ch->input < 0)
331  guess_map_any(ctx, ch, inputs);
332 
333  if (ch->input < 0) {
334  av_log(ctx, AV_LOG_ERROR, "Could not find input channel for "
335  "output channel '%s'.\n",
337  goto fail;
338  }
339 
341  ch->in_channel);
342  }
343 
344  /* print mappings */
345  av_log(ctx, AV_LOG_VERBOSE, "mappings: ");
346  for (i = 0; i < s->nb_channels; i++) {
347  ChannelMap *ch = &s->channels[i];
348  av_log(ctx, AV_LOG_VERBOSE, "%d.%s => %s ", ch->input,
351  }
352  av_log(ctx, AV_LOG_VERBOSE, "\n");
353 
354  for (i = 0; i < ctx->nb_inputs; i++) {
355  if (!inputs[i])
356  av_log(ctx, AV_LOG_WARNING, "No channels are used from input "
357  "stream %d.\n", i);
358  }
359 
360 fail:
361  av_freep(&inputs);
362  return ret;
363 }
364 
366 {
367  AVFilterLink *outlink = ctx->outputs[0];
368  JoinContext *s = ctx->priv;
369  AVFrame *frame;
370  int linesize = INT_MAX;
371  int nb_samples = INT_MAX;
372  int nb_buffers = 0;
373  int i, j, ret;
374 
375  for (i = 0; i < ctx->nb_inputs; i++) {
376  if (!s->input_frames[i])
377  return 0;
378  nb_samples = FFMIN(nb_samples, s->input_frames[i]->nb_samples);
379  }
380  if (!nb_samples)
381  return 0;
382 
383  /* setup the output frame */
384  frame = av_frame_alloc();
385  if (!frame)
386  return AVERROR(ENOMEM);
387  if (s->nb_channels > FF_ARRAY_ELEMS(frame->data)) {
389  sizeof(*frame->extended_data));
390  if (!frame->extended_data) {
391  ret = AVERROR(ENOMEM);
392  goto fail;
393  }
394  }
395 
396  /* copy the data pointers */
397  for (i = 0; i < s->nb_channels; i++) {
398  ChannelMap *ch = &s->channels[i];
399  AVFrame *cur = s->input_frames[ch->input];
400  AVBufferRef *buf;
401 
402  frame->extended_data[i] = cur->extended_data[ch->in_channel_idx];
403  linesize = FFMIN(linesize, cur->linesize[0]);
404 
405  /* add the buffer where this plan is stored to the list if it's
406  * not already there */
408  if (!buf) {
409  ret = AVERROR(EINVAL);
410  goto fail;
411  }
412  for (j = 0; j < nb_buffers; j++)
413  if (s->buffers[j]->buffer == buf->buffer)
414  break;
415  if (j == i)
416  s->buffers[nb_buffers++] = buf;
417  }
418 
419  /* create references to the buffers we copied to output */
420  if (nb_buffers > FF_ARRAY_ELEMS(frame->buf)) {
421  frame->nb_extended_buf = nb_buffers - FF_ARRAY_ELEMS(frame->buf);
423  sizeof(*frame->extended_buf));
424  if (!frame->extended_buf) {
425  frame->nb_extended_buf = 0;
426  ret = AVERROR(ENOMEM);
427  goto fail;
428  }
429  }
430  for (i = 0; i < FFMIN(FF_ARRAY_ELEMS(frame->buf), nb_buffers); i++) {
431  frame->buf[i] = av_buffer_ref(s->buffers[i]);
432  if (!frame->buf[i]) {
433  ret = AVERROR(ENOMEM);
434  goto fail;
435  }
436  }
437  for (i = 0; i < frame->nb_extended_buf; i++) {
438  frame->extended_buf[i] = av_buffer_ref(s->buffers[i +
439  FF_ARRAY_ELEMS(frame->buf)]);
440  if (!frame->extended_buf[i]) {
441  ret = AVERROR(ENOMEM);
442  goto fail;
443  }
444  }
445 
446  frame->nb_samples = nb_samples;
447  frame->channel_layout = outlink->channel_layout;
448  frame->channels = outlink->channels;
449  frame->sample_rate = outlink->sample_rate;
450  frame->format = outlink->format;
451  frame->pts = s->input_frames[0]->pts;
452  frame->linesize[0] = linesize;
453  if (frame->data != frame->extended_data) {
454  memcpy(frame->data, frame->extended_data, sizeof(*frame->data) *
455  FFMIN(FF_ARRAY_ELEMS(frame->data), s->nb_channels));
456  }
457 
458  ret = ff_filter_frame(outlink, frame);
459 
460  for (i = 0; i < ctx->nb_inputs; i++)
461  av_frame_free(&s->input_frames[i]);
462 
463  return ret;
464 
465 fail:
466  av_frame_free(&frame);
467  return ret;
468 }
469 
471 {
472  JoinContext *s = ctx->priv;
473  int i, ret, status;
474  int nb_samples = 0;
475  int64_t pts;
476 
478 
479  if (!s->input_frames[0]) {
480  ret = ff_inlink_consume_frame(ctx->inputs[0], &s->input_frames[0]);
481  if (ret < 0) {
482  return ret;
483  } else if (ff_inlink_acknowledge_status(ctx->inputs[0], &status, &pts)) {
484  ff_outlink_set_status(ctx->outputs[0], status, pts);
485  return 0;
486  } else {
487  if (ff_outlink_frame_wanted(ctx->outputs[0]) && !s->input_frames[0]) {
489  return 0;
490  }
491  }
492  if (!s->input_frames[0]) {
493  return 0;
494  }
495  }
496 
497  nb_samples = s->input_frames[0]->nb_samples;
498 
499  for (i = 1; i < ctx->nb_inputs && nb_samples > 0; i++) {
500  if (s->input_frames[i])
501  continue;
502 
503  if (ff_inlink_check_available_samples(ctx->inputs[i], nb_samples) > 0) {
504  ret = ff_inlink_consume_samples(ctx->inputs[i], nb_samples, nb_samples, &s->input_frames[i]);
505  if (ret < 0) {
506  return ret;
507  } else if (ff_inlink_acknowledge_status(ctx->inputs[i], &status, &pts)) {
508  ff_outlink_set_status(ctx->outputs[0], status, pts);
509  return 0;
510  }
511  } else {
512  if (ff_outlink_frame_wanted(ctx->outputs[0])) {
514  return 0;
515  }
516  }
517  }
518 
519  return try_push_frame(ctx);
520 }
521 
523  {
524  .name = "default",
525  .type = AVMEDIA_TYPE_AUDIO,
526  .config_props = join_config_output,
527  },
528  { NULL }
529 };
530 
532  .name = "join",
533  .description = NULL_IF_CONFIG_SMALL("Join multiple audio streams into "
534  "multi-channel output."),
535  .priv_size = sizeof(JoinContext),
536  .priv_class = &join_class,
537  .init = join_init,
538  .uninit = join_uninit,
539  .activate = activate,
541  .inputs = NULL,
542  .outputs = avfilter_af_join_outputs,
544 };
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link&#39;s FIFO and update the link&#39;s stats.
Definition: avfilter.c:1476
#define NULL
Definition: coverity.c:32
uint64_t in_channel
layout describing the input channel
Definition: af_channelmap.c:41
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
AVOption.
Definition: opt.h:246
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
Definition: filters.h:212
Main libavfilter public API header.
#define A
Definition: af_join.c:69
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:486
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:105
int nb_extended_buf
Number of elements in extended_buf.
Definition: frame.h:504
enum AVMediaType type
AVFilterPad type.
Definition: internal.h:65
int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min)
Test if enough samples are available on the link.
Definition: avfilter.c:1461
AVBufferRef ** buffers
Temporary storage for buffer references, for assembling the output frame.
Definition: af_join.c:65
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1602
char * map
Definition: af_join.c:50
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
const char * name
Pad name.
Definition: internal.h:60
int nb_channels
Definition: af_join.c:54
uint64_t av_get_channel_layout(const char *name)
Return a channel layout id that matches name, or 0 if no match is found.
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:434
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1075
#define av_cold
Definition: attributes.h:82
static av_cold void join_uninit(AVFilterContext *ctx)
Definition: af_join.c:205
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
static av_cold int uninit(AVCodecContext *avctx)
Definition: crystalhd.c:279
AVOptions.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
static av_cold int join_init(AVFilterContext *ctx)
Definition: af_join.c:162
static void guess_map_matching(AVFilterContext *ctx, ChannelMap *ch, uint64_t *inputs)
Definition: af_join.c:246
int input
input stream index
Definition: af_join.c:40
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function.If this function returns true
#define av_log(a,...)
static int join_query_formats(AVFilterContext *ctx)
Definition: af_join.c:223
A filter pad used for either input or output.
Definition: internal.h:54
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1431
AVFilterPad * input_pads
array of input pads
Definition: avfilter.h:345
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:569
int ff_add_channel_layout(AVFilterChannelLayouts **l, uint64_t channel_layout)
Definition: formats.c:342
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
simple assert() macros that are a bit more flexible than ISO C assert().
#define fail()
Definition: checkasm.h:122
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:472
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
static const AVOption join_options[]
Definition: af_join.c:71
int channels
number of audio channels, only used for audio.
Definition: frame.h:601
audio channel layout utility functions
unsigned nb_inputs
number of input pads
Definition: avfilter.h:347
#define FFMIN(a, b)
Definition: common.h:96
AVBufferRef ** extended_buf
For planar audio which requires more than AV_NUM_DATA_POINTERS AVBufferRef pointers, this array will hold all the references which cannot fit into AVFrame.buf.
Definition: frame.h:500
AVFormatContext * ctx
Definition: movenc.c:48
AVBufferRef * av_frame_get_plane_buffer(AVFrame *frame, int plane)
Get the buffer reference a given data plane is stored in.
Definition: frame.c:659
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
AVFilterFormats * ff_planar_sample_fmts(void)
Construct a formats list containing all planar sample formats.
Definition: formats.c:381
AVFilter ff_af_join
Definition: af_join.c:531
AVFilterChannelLayouts * ff_all_channel_layouts(void)
Construct an empty AVFilterChannelLayouts/AVFilterFormats struct – representing any channel layout (...
Definition: formats.c:400
static const AVFilterPad avfilter_af_join_outputs[]
Definition: af_join.c:522
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
#define FF_ARRAY_ELEMS(a)
int in_channel_idx
index of in_channel in the input stream data
Definition: af_channelmap.c:43
A list of supported channel layouts.
Definition: formats.h:85
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:368
static int activate(AVFilterContext *ctx)
Definition: af_join.c:470
AVFILTER_DEFINE_CLASS(join)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link&#39;s FIFO and update the link&#39;s stats.
Definition: avfilter.c:1495
int inputs
Definition: af_join.c:49
void * buf
Definition: avisynth_c.h:766
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
AVBuffer * buffer
Definition: buffer.h:82
Describe the class of an AVClass context structure.
Definition: log.h:67
int sample_rate
Sample rate of the audio data.
Definition: frame.h:467
#define PARSE_CHANNEL(str, var, inout)
Filter definition.
Definition: avfilter.h:144
uint64_t out_channel
layout describing the output channel
Definition: af_channelmap.c:42
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
const char * name
Filter name.
Definition: avfilter.h:148
const VDPAUPixFmtMap * map
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
int av_get_channel_layout_channel_index(uint64_t channel_layout, uint64_t channel)
Get the index of a channel in channel_layout.
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:394
static int64_t pts
#define flags(name, subs,...)
Definition: cbs_av1.c:561
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
AVFrame ** input_frames
Temporary storage for input frames, until we get one on each input.
Definition: af_join.c:60
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
ChannelMap * channels
Definition: af_join.c:55
#define F
Definition: af_join.c:70
A reference to a data buffer.
Definition: buffer.h:81
#define OFFSET(x)
Definition: af_join.c:68
static int query_formats(AVFilterContext *ctx)
Definition: aeval.c:244
static void guess_map_any(AVFilterContext *ctx, ChannelMap *ch, uint64_t *inputs)
Definition: af_join.c:264
common internal and external API header
uint64_t av_channel_layout_extract_channel(uint64_t channel_layout, int index)
Get the channel with the given index in channel_layout.
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:94
static int parse_maps(AVFilterContext *ctx)
Definition: af_join.c:83
char * channel_layout_str
Definition: af_join.c:51
const char * av_get_channel_name(uint64_t channel)
Get the name of a given channel.
static int try_push_frame(AVFilterContext *ctx)
Definition: af_join.c:365
An instance of a filter.
Definition: avfilter.h:338
#define av_freep(p)
static int join_config_output(AVFilterLink *outlink)
Definition: af_join.c:283
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:342
uint64_t channel_layout
Definition: af_join.c:52
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:361
int ff_set_common_samplerates(AVFilterContext *ctx, AVFilterFormats *samplerates)
Definition: formats.c:557
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:191
static int ff_insert_inpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new input pad for the filter.
Definition: internal.h:266