FFmpeg
fifo.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2007 Bobby Bingham
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * FIFO buffering filter
24  */
25 
26 #include "libavutil/avassert.h"
28 #include "libavutil/common.h"
29 #include "libavutil/mathematics.h"
30 #include "libavutil/samplefmt.h"
31 
32 #include "audio.h"
33 #include "avfilter.h"
34 #include "internal.h"
35 #include "video.h"
36 
37 typedef struct Buf {
39  struct Buf *next;
40 } Buf;
41 
42 typedef struct FifoContext {
44  Buf *last; ///< last buffered frame
45 
46  /**
47  * When a specific number of output samples is requested, the partial
48  * buffer is stored here
49  */
51  int allocated_samples; ///< number of samples out was allocated for
52 } FifoContext;
53 
55 {
56  FifoContext *s = ctx->priv;
57  s->last = &s->root;
58 
59  return 0;
60 }
61 
63 {
64  FifoContext *s = ctx->priv;
65  Buf *buf, *tmp;
66 
67  for (buf = s->root.next; buf; buf = tmp) {
68  tmp = buf->next;
69  av_frame_free(&buf->frame);
70  av_free(buf);
71  }
72 
73  av_frame_free(&s->out);
74 }
75 
77 {
78  FifoContext *s = inlink->dst->priv;
79 
80  s->last->next = av_mallocz(sizeof(Buf));
81  if (!s->last->next) {
82  av_frame_free(&frame);
83  return AVERROR(ENOMEM);
84  }
85 
86  s->last = s->last->next;
87  s->last->frame = frame;
88 
89  return 0;
90 }
91 
92 static void queue_pop(FifoContext *s)
93 {
94  Buf *tmp = s->root.next->next;
95  if (s->last == s->root.next)
96  s->last = &s->root;
97  av_freep(&s->root.next);
98  s->root.next = tmp;
99 }
100 
101 /**
102  * Move data pointers and pts offset samples forward.
103  */
105  int offset)
106 {
107  int nb_channels = link->channels;
109  int planes = planar ? nb_channels : 1;
110  int block_align = av_get_bytes_per_sample(link->format) * (planar ? 1 : nb_channels);
111  int i;
112 
113  av_assert0(frame->nb_samples > offset);
114 
115  for (i = 0; i < planes; i++)
116  frame->extended_data[i] += block_align * offset;
117  if (frame->data != frame->extended_data)
118  memcpy(frame->data, frame->extended_data,
119  FFMIN(planes, FF_ARRAY_ELEMS(frame->data)) * sizeof(*frame->data));
120  frame->linesize[0] -= block_align*offset;
121  frame->nb_samples -= offset;
122 
123  if (frame->pts != AV_NOPTS_VALUE) {
124  frame->pts += av_rescale_q(offset, (AVRational){1, link->sample_rate},
125  link->time_base);
126  }
127 }
128 
130 {
131  int planes = av_sample_fmt_is_planar(frame->format) ?
132  frame->channels : 1;
133  int min_align = 128;
134  int p;
135 
136  for (p = 0; p < planes; p++) {
137  int cur_align = 128;
138  while ((intptr_t)frame->extended_data[p] % cur_align)
139  cur_align >>= 1;
140  if (cur_align < min_align)
141  min_align = cur_align;
142  }
143  return min_align;
144 }
145 
147 {
148  AVFilterLink *link = ctx->outputs[0];
149  FifoContext *s = ctx->priv;
150  AVFrame *head = s->root.next ? s->root.next->frame : NULL;
151  AVFrame *out;
152  int ret;
153 
154  /* if head is NULL then we're flushing the remaining samples in out */
155  if (!head && !s->out)
156  return AVERROR_EOF;
157 
158  if (!s->out &&
159  head->nb_samples >= link->request_samples &&
160  calc_ptr_alignment(head) >= 32) {
161  if (head->nb_samples == link->request_samples) {
162  out = head;
163  queue_pop(s);
164  } else {
165  out = av_frame_clone(head);
166  if (!out)
167  return AVERROR(ENOMEM);
168 
169  out->nb_samples = link->request_samples;
170  buffer_offset(link, head, link->request_samples);
171  }
172  } else {
173  int nb_channels = link->channels;
174 
175  if (!s->out) {
176  s->out = ff_get_audio_buffer(link, link->request_samples);
177  if (!s->out)
178  return AVERROR(ENOMEM);
179 
180  s->out->nb_samples = 0;
181  s->out->pts = head->pts;
183  } else if (link->request_samples != s->allocated_samples) {
184  av_log(ctx, AV_LOG_ERROR, "request_samples changed before the "
185  "buffer was returned.\n");
186  return AVERROR(EINVAL);
187  }
188 
189  while (s->out->nb_samples < s->allocated_samples) {
190  int len;
191 
192  if (!s->root.next) {
193  ret = ff_request_frame(ctx->inputs[0]);
194  if (ret == AVERROR_EOF) {
196  s->out->nb_samples,
197  s->allocated_samples -
198  s->out->nb_samples,
199  nb_channels, link->format);
201  break;
202  } else if (ret < 0)
203  return ret;
204  if (!s->root.next)
205  return 0;
206  }
207  head = s->root.next->frame;
208 
209  len = FFMIN(s->allocated_samples - s->out->nb_samples,
210  head->nb_samples);
211 
213  s->out->nb_samples, 0, len, nb_channels,
214  link->format);
215  s->out->nb_samples += len;
216 
217  if (len == head->nb_samples) {
218  av_frame_free(&head);
219  queue_pop(s);
220  } else {
221  buffer_offset(link, head, len);
222  }
223  }
224  out = s->out;
225  s->out = NULL;
226  }
227  return ff_filter_frame(link, out);
228 }
229 
230 static int request_frame(AVFilterLink *outlink)
231 {
232  FifoContext *s = outlink->src->priv;
233  int ret = 0;
234 
235  if (!s->root.next) {
236  if ((ret = ff_request_frame(outlink->src->inputs[0])) < 0) {
237  if (ret == AVERROR_EOF && outlink->request_samples)
238  return return_audio_frame(outlink->src);
239  return ret;
240  }
241  if (!s->root.next)
242  return 0;
243  }
244 
245  if (outlink->request_samples) {
246  return return_audio_frame(outlink->src);
247  } else {
248  ret = ff_filter_frame(outlink, s->root.next->frame);
249  queue_pop(s);
250  }
251 
252  return ret;
253 }
254 
256  {
257  .name = "default",
258  .type = AVMEDIA_TYPE_VIDEO,
259  .filter_frame = add_to_queue,
260  },
261  { NULL }
262 };
263 
265  {
266  .name = "default",
267  .type = AVMEDIA_TYPE_VIDEO,
268  .request_frame = request_frame,
269  },
270  { NULL }
271 };
272 
274  .name = "fifo",
275  .description = NULL_IF_CONFIG_SMALL("Buffer input images and send them when they are requested."),
276  .init = init,
277  .uninit = uninit,
278  .priv_size = sizeof(FifoContext),
279  .inputs = avfilter_vf_fifo_inputs,
280  .outputs = avfilter_vf_fifo_outputs,
281 };
282 
284  {
285  .name = "default",
286  .type = AVMEDIA_TYPE_AUDIO,
287  .filter_frame = add_to_queue,
288  },
289  { NULL }
290 };
291 
293  {
294  .name = "default",
295  .type = AVMEDIA_TYPE_AUDIO,
296  .request_frame = request_frame,
297  },
298  { NULL }
299 };
300 
302  .name = "afifo",
303  .description = NULL_IF_CONFIG_SMALL("Buffer input frames and send them when they are requested."),
304  .init = init,
305  .uninit = uninit,
306  .priv_size = sizeof(FifoContext),
307  .inputs = avfilter_af_afifo_inputs,
308  .outputs = avfilter_af_afifo_outputs,
309 };
static int calc_ptr_alignment(AVFrame *frame)
Definition: fifo.c:129
AVFrame * out
When a specific number of output samples is requested, the partial buffer is stored here...
Definition: fifo.c:50
#define NULL
Definition: coverity.c:32
AVFilter ff_vf_fifo
Definition: fifo.c:273
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
Buf * last
last buffered frame
Definition: fifo.c:44
static av_cold int init(AVFilterContext *ctx)
Definition: fifo.c:54
static const AVFilterPad avfilter_vf_fifo_inputs[]
Definition: fifo.c:255
Main libavfilter public API header.
static void queue_pop(FifoContext *s)
Definition: fifo.c:92
static av_cold void uninit(AVFilterContext *ctx)
Definition: fifo.c:62
static int add_to_queue(AVFilterLink *inlink, AVFrame *frame)
Definition: fifo.c:76
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVFrame * frame
Definition: fifo.c:38
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1093
#define av_cold
Definition: attributes.h:82
Definition: fifo.c:37
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
AVFilter ff_af_afifo
Definition: fifo.c:301
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
#define AVERROR_EOF
End of file.
Definition: error.h:55
static const AVFilterPad avfilter_vf_fifo_outputs[]
Definition: fifo.c:264
Buf root
Definition: fifo.c:43
#define av_log(a,...)
static const AVFilterPad avfilter_af_afifo_inputs[]
Definition: fifo.c:283
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:112
A filter pad used for either input or output.
Definition: internal.h:54
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
struct Buf * next
Definition: fifo.c:39
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int av_samples_set_silence(uint8_t **audio_data, int offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Fill an audio buffer with silence.
Definition: samplefmt.c:237
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:86
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
simple assert() macros that are a bit more flexible than ISO C assert().
int allocated_samples
number of samples out was allocated for
Definition: fifo.c:51
int channels
number of audio channels, only used for audio.
Definition: frame.h:601
audio channel layout utility functions
#define FFMIN(a, b)
Definition: common.h:96
AVFormatContext * ctx
Definition: movenc.c:48
#define s(width, name)
Definition: cbs_vp9.c:257
static void buffer_offset(AVFilterLink *link, AVFrame *frame, int offset)
Move data pointers and pts offset samples forward.
Definition: fifo.c:104
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
#define FF_ARRAY_ELEMS(a)
if(ret)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:368
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch const uint8_t **in ch off *out planar
Definition: audioconvert.c:56
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:213
void * buf
Definition: avisynth_c.h:766
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
static int request_frame(AVFilterLink *outlink)
Definition: fifo.c:230
Filter definition.
Definition: avfilter.h:144
Rational number (pair of numerator and denominator).
Definition: rational.h:58
const char * name
Filter name.
Definition: avfilter.h:148
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:106
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
common internal and external API header
static int return_audio_frame(AVFilterContext *ctx)
Definition: fifo.c:146
#define av_free(p)
static const struct @317 planes[]
int len
static const AVFilterPad avfilter_af_afifo_outputs[]
Definition: fifo.c:292
An instance of a filter.
Definition: avfilter.h:338
FILE * out
Definition: movenc.c:54
#define av_freep(p)
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:407
int nb_channels
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:342
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:361
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
static uint8_t tmp[11]
Definition: aes_ctr.c:26