FFmpeg
vmdaudio.c
Go to the documentation of this file.
1 /*
2  * Sierra VMD audio decoder
3  * Copyright (c) 2004 The FFmpeg Project
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Sierra VMD audio decoder
25  * by Vladimir "VAG" Gneushev (vagsoft at mail.ru)
26  * for more information on the Sierra VMD format, visit:
27  * http://www.pcisys.net/~melanson/codecs/
28  *
29  * The audio decoder, expects each encoded data
30  * chunk to be prepended with the appropriate 16-byte frame information
31  * record from the VMD file. It does not require the 0x330-byte VMD file
32  * header, but it does need the audio setup parameters passed in through
33  * normal libavcodec API means.
34  */
35 
36 #include <string.h>
37 
38 #include "libavutil/avassert.h"
40 #include "libavutil/common.h"
41 #include "libavutil/intreadwrite.h"
42 
43 #include "avcodec.h"
44 #include "internal.h"
45 
46 #define BLOCK_TYPE_AUDIO 1
47 #define BLOCK_TYPE_INITIAL 2
48 #define BLOCK_TYPE_SILENCE 3
49 
50 typedef struct VmdAudioContext {
51  int out_bps;
54 
55 static const uint16_t vmdaudio_table[128] = {
56  0x000, 0x008, 0x010, 0x020, 0x030, 0x040, 0x050, 0x060, 0x070, 0x080,
57  0x090, 0x0A0, 0x0B0, 0x0C0, 0x0D0, 0x0E0, 0x0F0, 0x100, 0x110, 0x120,
58  0x130, 0x140, 0x150, 0x160, 0x170, 0x180, 0x190, 0x1A0, 0x1B0, 0x1C0,
59  0x1D0, 0x1E0, 0x1F0, 0x200, 0x208, 0x210, 0x218, 0x220, 0x228, 0x230,
60  0x238, 0x240, 0x248, 0x250, 0x258, 0x260, 0x268, 0x270, 0x278, 0x280,
61  0x288, 0x290, 0x298, 0x2A0, 0x2A8, 0x2B0, 0x2B8, 0x2C0, 0x2C8, 0x2D0,
62  0x2D8, 0x2E0, 0x2E8, 0x2F0, 0x2F8, 0x300, 0x308, 0x310, 0x318, 0x320,
63  0x328, 0x330, 0x338, 0x340, 0x348, 0x350, 0x358, 0x360, 0x368, 0x370,
64  0x378, 0x380, 0x388, 0x390, 0x398, 0x3A0, 0x3A8, 0x3B0, 0x3B8, 0x3C0,
65  0x3C8, 0x3D0, 0x3D8, 0x3E0, 0x3E8, 0x3F0, 0x3F8, 0x400, 0x440, 0x480,
66  0x4C0, 0x500, 0x540, 0x580, 0x5C0, 0x600, 0x640, 0x680, 0x6C0, 0x700,
67  0x740, 0x780, 0x7C0, 0x800, 0x900, 0xA00, 0xB00, 0xC00, 0xD00, 0xE00,
68  0xF00, 0x1000, 0x1400, 0x1800, 0x1C00, 0x2000, 0x3000, 0x4000
69 };
70 
72 {
73  VmdAudioContext *s = avctx->priv_data;
74 
75  if (avctx->channels < 1 || avctx->channels > 2) {
76  av_log(avctx, AV_LOG_ERROR, "invalid number of channels\n");
77  return AVERROR(EINVAL);
78  }
79  if (avctx->block_align < 1 || avctx->block_align % avctx->channels) {
80  av_log(avctx, AV_LOG_ERROR, "invalid block align\n");
81  return AVERROR(EINVAL);
82  }
83 
84  avctx->channel_layout = avctx->channels == 1 ? AV_CH_LAYOUT_MONO :
86 
87  if (avctx->bits_per_coded_sample == 16)
89  else
92 
93  s->chunk_size = avctx->block_align + avctx->channels * (s->out_bps == 2);
94 
95  av_log(avctx, AV_LOG_DEBUG, "%d channels, %d bits/sample, "
96  "block align = %d, sample rate = %d\n",
97  avctx->channels, avctx->bits_per_coded_sample, avctx->block_align,
98  avctx->sample_rate);
99 
100  return 0;
101 }
102 
103 static void decode_audio_s16(int16_t *out, const uint8_t *buf, int buf_size,
104  int channels)
105 {
106  int ch;
107  const uint8_t *buf_end = buf + buf_size;
108  int predictor[2];
109  int st = channels - 1;
110 
111  /* decode initial raw sample */
112  for (ch = 0; ch < channels; ch++) {
113  predictor[ch] = (int16_t)AV_RL16(buf);
114  buf += 2;
115  *out++ = predictor[ch];
116  }
117 
118  /* decode DPCM samples */
119  ch = 0;
120  while (buf < buf_end) {
121  uint8_t b = *buf++;
122  if (b & 0x80)
123  predictor[ch] -= vmdaudio_table[b & 0x7F];
124  else
125  predictor[ch] += vmdaudio_table[b];
126  predictor[ch] = av_clip_int16(predictor[ch]);
127  *out++ = predictor[ch];
128  ch ^= st;
129  }
130 }
131 
132 static int vmdaudio_decode_frame(AVCodecContext *avctx, void *data,
133  int *got_frame_ptr, AVPacket *avpkt)
134 {
135  AVFrame *frame = data;
136  const uint8_t *buf = avpkt->data;
137  const uint8_t *buf_end;
138  int buf_size = avpkt->size;
139  VmdAudioContext *s = avctx->priv_data;
140  int block_type, silent_chunks, audio_chunks;
141  int ret;
142  uint8_t *output_samples_u8;
143  int16_t *output_samples_s16;
144 
145  if (buf_size < 16) {
146  av_log(avctx, AV_LOG_WARNING, "skipping small junk packet\n");
147  *got_frame_ptr = 0;
148  return buf_size;
149  }
150 
151  block_type = buf[6];
152  if (block_type < BLOCK_TYPE_AUDIO || block_type > BLOCK_TYPE_SILENCE) {
153  av_log(avctx, AV_LOG_ERROR, "unknown block type: %d\n", block_type);
154  return AVERROR(EINVAL);
155  }
156  buf += 16;
157  buf_size -= 16;
158 
159  /* get number of silent chunks */
160  silent_chunks = 0;
161  if (block_type == BLOCK_TYPE_INITIAL) {
162  uint32_t flags;
163  if (buf_size < 4) {
164  av_log(avctx, AV_LOG_ERROR, "packet is too small\n");
165  return AVERROR(EINVAL);
166  }
167  flags = AV_RB32(buf);
168  silent_chunks = av_popcount(flags);
169  buf += 4;
170  buf_size -= 4;
171  } else if (block_type == BLOCK_TYPE_SILENCE) {
172  silent_chunks = 1;
173  buf_size = 0; // should already be zero but set it just to be sure
174  }
175 
176  /* ensure output buffer is large enough */
177  audio_chunks = buf_size / s->chunk_size;
178 
179  /* drop incomplete chunks */
180  buf_size = audio_chunks * s->chunk_size;
181 
182  /* get output buffer */
183  frame->nb_samples = ((silent_chunks + audio_chunks) * avctx->block_align) /
184  avctx->channels;
185  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
186  return ret;
187  output_samples_u8 = frame->data[0];
188  output_samples_s16 = (int16_t *)frame->data[0];
189 
190  /* decode silent chunks */
191  if (silent_chunks > 0) {
192  int silent_size = avctx->block_align * silent_chunks;
193  av_assert0(avctx->block_align * silent_chunks <= frame->nb_samples * avctx->channels);
194 
195  if (s->out_bps == 2) {
196  memset(output_samples_s16, 0x00, silent_size * 2);
197  output_samples_s16 += silent_size;
198  } else {
199  memset(output_samples_u8, 0x80, silent_size);
200  output_samples_u8 += silent_size;
201  }
202  }
203 
204  /* decode audio chunks */
205  if (audio_chunks > 0) {
206  buf_end = buf + buf_size;
207  av_assert0((buf_size & (avctx->channels > 1)) == 0);
208  while (buf_end - buf >= s->chunk_size) {
209  if (s->out_bps == 2) {
210  decode_audio_s16(output_samples_s16, buf, s->chunk_size,
211  avctx->channels);
212  output_samples_s16 += avctx->block_align;
213  } else {
214  memcpy(output_samples_u8, buf, s->chunk_size);
215  output_samples_u8 += avctx->block_align;
216  }
217  buf += s->chunk_size;
218  }
219  }
220 
221  *got_frame_ptr = 1;
222 
223  return avpkt->size;
224 }
225 
227  .name = "vmdaudio",
228  .long_name = NULL_IF_CONFIG_SMALL("Sierra VMD audio"),
229  .type = AVMEDIA_TYPE_AUDIO,
230  .id = AV_CODEC_ID_VMDAUDIO,
231  .priv_data_size = sizeof(VmdAudioContext),
234  .capabilities = AV_CODEC_CAP_DR1,
235 };
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
channels
Definition: aptx.c:30
int size
Definition: avcodec.h:1481
#define AV_CH_LAYOUT_STEREO
AVCodec.
Definition: avcodec.h:3492
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:87
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
Definition: avcodec.h:2265
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2236
uint8_t
#define av_cold
Definition: attributes.h:82
AV_SAMPLE_FMT_U8
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:87
uint8_t * data
Definition: avcodec.h:1480
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:2792
#define av_log(a,...)
static av_cold int vmdaudio_decode_init(AVCodecContext *avctx)
Definition: vmdaudio.c:71
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
simple assert() macros that are a bit more flexible than ISO C assert().
const char * name
Name of the codec implementation.
Definition: avcodec.h:3499
static void decode_audio_s16(int16_t *out, const uint8_t *buf, int buf_size, int channels)
Definition: vmdaudio.c:103
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2279
#define b
Definition: input.c:41
audio channel layout utility functions
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
#define BLOCK_TYPE_SILENCE
Definition: vmdaudio.c:48
static const uint16_t vmdaudio_table[128]
Definition: vmdaudio.c:55
if(ret)
Libavcodec external API header.
int sample_rate
samples per second
Definition: avcodec.h:2228
main external API structure.
Definition: avcodec.h:1568
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1968
void * buf
Definition: avisynth_c.h:766
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
#define flags(name, subs,...)
Definition: cbs_av1.c:561
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:106
#define BLOCK_TYPE_INITIAL
Definition: vmdaudio.c:47
common internal api header.
common internal and external API header
signed 16 bits
Definition: samplefmt.h:61
AVCodec ff_vmdaudio_decoder
Definition: vmdaudio.c:226
void * priv_data
Definition: avcodec.h:1595
int channels
number of audio channels
Definition: avcodec.h:2229
FILE * out
Definition: movenc.c:54
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
#define AV_CH_LAYOUT_MONO
This structure stores compressed data.
Definition: avcodec.h:1457
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:361
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:984
static int vmdaudio_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
Definition: vmdaudio.c:132