32 static const uint32_t
pixel_mask[3] = { 0xffffffff, 0x03ff03ff, 0x0fff0fff };
33 #define SIZEOF_PIXEL ((bit_depth + 7) / 8) 35 #define randomize_buffers() \ 37 uint32_t mask = pixel_mask[(bit_depth - 8) >> 1]; \ 39 for (k = -4; k < SIZEOF_PIXEL * FFMAX(8, size); k += 4) { \ 40 uint32_t r = rnd() & mask; \ 43 for (k = 0; k < size * SIZEOF_PIXEL; k += 4) { \ 44 uint32_t r = rnd() & mask; \ 78 for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
80 for (tx = 0; tx < 4; tx++) {
85 mode_names[mode], size, size, bit_depth)) {
88 call_new(dst1, size * SIZEOF_PIXEL, l, a);
89 if (memcmp(dst0, dst1, size * size * SIZEOF_PIXEL))
91 bench_new(dst1, size * SIZEOF_PIXEL,l, a);
99 #undef randomize_buffers 101 #define randomize_buffers() \ 103 uint32_t mask = pixel_mask[(bit_depth - 8) >> 1]; \ 104 for (y = 0; y < sz; y++) { \ 105 for (x = 0; x < sz * SIZEOF_PIXEL; x += 4) { \ 106 uint32_t r = rnd() & mask; \ 107 AV_WN32A(dst + y * sz * SIZEOF_PIXEL + x, r); \ 108 AV_WN32A(src + y * sz * SIZEOF_PIXEL + x, rnd() & mask); \ 110 for (x = 0; x < sz; x++) { \ 111 if (bit_depth == 8) { \ 112 coef[y * sz + x] = src[y * sz + x] - dst[y * sz + x]; \ 114 ((int32_t *) coef)[y * sz + x] = \ 115 ((uint16_t *) src)[y * sz + x] - \ 116 ((uint16_t *) dst)[y * sz + x]; \ 125 double t0 = in[0] + in[1];
126 double t3 = in[3] - in[2];
127 double t4 =
trunc((t0 - t3) * 0.5);
128 double t1 = t4 - in[1];
129 double t2 = t4 - in[2];
142 for (k = 0; k < sz; k++) {
144 for (n = 0; n < sz; n++)
145 out[k] += in[n] * cos(
M_PI * (2 * n + 1) * k / (sz * 2.0));
157 for (k = 0; k < sz; k++) {
159 for (n = 0; n < sz; n++)
160 out[k] += in[n] * sin(
M_PI * (n + 1) * (2 * k + 1) / (sz * 2.0 + 1.0));
171 for (k = 0; k < sz; k++) {
173 for (n = 0; n < sz; n++)
174 out[k] += in[n] * sin(
M_PI * (2 * n + 1) * (2 * k + 1) / (sz * 4.0));
182 static const double scaling_factors[5][4] = {
184 { 2.0, 2.0, 2.0, 2.0 },
185 { 1.0, 1.0, 1.0, 1.0 },
189 static const ftx1d_fn ftx1d_tbl[5][4][2] = {
212 double scaling_factor = scaling_factors[tx][txtp];
216 for (i = 0; i < sz; ++
i) {
219 ftx1d_tbl[tx][txtp][0](temp_out, &in[i * sz], sz);
221 for (j = 0; j < sz; ++j)
222 temp[j * sz + i] = temp_out[j] * scaling_factor;
226 for (i = 0; i < sz; i++)
227 ftx1d_tbl[tx][txtp][1](&out[i * sz], &temp[i * sz], sz);
233 double ind[1024], outd[1024];
237 for (n = 0; n < sz * sz; n++) {
243 ftx_2d(outd, ind, tx, txtp, sz);
244 for (n = 0; n < sz * sz; n++) {
246 buf[n] =
lrint(outd[n]);
266 for (n = 0; n < sz * sz; n++) {
267 int rc = scan[n], rcx = rc % sz, rcy = rc / sz;
270 if (rcx >= sub || rcy >= sub)
274 if (bit_depth == 8) {
283 for (; n < sz * sz; n++) {
287 if (bit_depth == 8) {
301 for (n = 0; n < sz /
sizeof(int16_t); n += 2)
308 #define SIZEOF_COEF (2 * ((bit_depth + 7) / 8)) 327 for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
331 int sz = 4 << (tx & 3);
334 for (txtp = 0; txtp < n_txtps; txtp++) {
340 for (sub = (txtp == 0 && tx < 4) ? 1 : sz; sub <= sz;
341 sub < 4 ? (sub <<= 1) : (sub += 4)) {
343 "vp9_inv_%s_%dx%d_sub%d_add_%d",
344 tx == 4 ?
"wht_wht" : txtp_types[txtp],
345 sz, sz, sub, bit_depth)) {
349 ftx(coef, tx, txtp, sz, bit_depth);
360 memcpy(dst1, dst, sz * sz * SIZEOF_PIXEL);
362 call_ref(dst0, sz * SIZEOF_PIXEL, subcoef0, eob);
363 call_new(dst1, sz * SIZEOF_PIXEL, subcoef1, eob);
364 if (memcmp(dst0, dst1, sz * sz * SIZEOF_PIXEL) ||
365 !
iszero(subcoef0, sz * sz * SIZEOF_COEF) ||
366 !
iszero(subcoef1, sz * sz * SIZEOF_COEF))
369 bench_new(dst, sz * SIZEOF_PIXEL, coef, eob);
378 #undef randomize_buffers 380 #define setpx(a,b,c) \ 382 if (SIZEOF_PIXEL == 1) { \ 383 buf0[(a) + (b) * jstride] = av_clip_uint8(c); \ 385 ((uint16_t *)buf0)[(a) + (b) * jstride] = av_clip_uintp2(c, bit_depth); \ 390 #define setdx(a,b,c,d) setpx(a,b,c-(d)+(rnd()%((d)*2+1))) 391 #define setsx(a,b,c,d) setdx(a,b,c,(d) << (bit_depth - 8)) 394 const int *
F,
const int *
H,
const int *I,
398 int off = dir ? lineoff : lineoff * 16;
399 int istride = dir ? 1 : 16;
400 int jstride = dir ? str : 1;
402 for (i = 0; i < 2; i++) {
403 int idx = off + i * istride, p0,
q0;
405 setsx(idx, -1, p0 = q0, E[bidx] >> 2);
406 for (j = 1; j < 8; j++) {
407 setsx(idx, -1 - j, p0, F[bidx]);
408 setsx(idx, j, q0, F[bidx]);
411 for (i = 2; i < 4; i++) {
412 int idx = off + i * istride, p0,
q0;
414 setsx(idx, -1, p0 = q0, E[bidx] >> 2);
415 for (j = 1; j < 4; j++) {
416 setsx(idx, -1 - j, p0, F[bidx]);
417 setsx(idx, j, q0, F[bidx]);
419 for (j = 4; j < 8; j++) {
424 for (i = 4; i < 6; i++) {
425 int idx = off + i * istride, p2, p1, p0,
q0,
q1, q2;
427 setsx(idx, 1, q1 = q0, I[bidx]);
428 setsx(idx, 2, q2 = q1, I[bidx]);
429 setsx(idx, 3, q2, I[bidx]);
430 setsx(idx, -1, p0 = q0, E[bidx] >> 2);
431 setsx(idx, -2, p1 = p0, I[bidx]);
432 setsx(idx, -3, p2 = p1, I[bidx]);
433 setsx(idx, -4, p2, I[bidx]);
434 for (j = 4; j < 8; j++) {
439 for (i = 6; i < 8; i++) {
440 int idx = off + i * istride;
441 for (j = 0; j < 8; j++) {
447 #define randomize_buffers(bidx, lineoff, str) \ 448 randomize_loopfilter_buffers(bidx, lineoff, str, bit_depth, dir, \ 449 E, F, H, I, buf0, buf1) 457 static const char *
const dir_name[2] = {
"h",
"v" };
458 static const int E[2] = { 20, 28 }, I[2] = { 10, 16 };
459 static const int H[2] = { 7, 11 },
F[2] = { 1, 1 };
462 for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
465 for (dir = 0; dir < 2; dir++) {
468 uint8_t *buf0 = base0 + midoff_aligned;
469 uint8_t *buf1 = base1 + midoff_aligned;
471 for (wd = 0; wd < 3; wd++) {
474 "vp9_loop_filter_%s_%d_8_%dbpp",
475 dir_name[dir], 4 << wd, bit_depth)) {
477 memcpy(buf1 - midoff, buf0 - midoff,
481 if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 8 *
SIZEOF_PIXEL))
490 buf0 = base0 + midoff_aligned;
491 buf1 = base1 + midoff_aligned;
495 "vp9_loop_filter_%s_16_16_%dbpp",
496 dir_name[dir], bit_depth)) {
499 memcpy(buf1 - midoff, buf0 - midoff, 16 * 16 *
SIZEOF_PIXEL);
502 if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 16 *
SIZEOF_PIXEL))
507 for (wd = 0; wd < 2; wd++) {
508 for (wd2 = 0; wd2 < 2; wd2++) {
511 "vp9_loop_filter_mix2_%s_%d%d_16_%dbpp",
512 dir_name[dir], 4 << wd, 4 << wd2, bit_depth)) {
515 memcpy(buf1 - midoff, buf0 - midoff, 16 * 16 *
SIZEOF_PIXEL);
516 #define M(a) (((a)[1] << 8) | (a)[0]) 519 if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 16 *
SIZEOF_PIXEL))
534 #undef randomize_buffers 536 #define DST_BUF_SIZE (size * size * SIZEOF_PIXEL) 537 #define SRC_BUF_STRIDE 72 538 #define SRC_BUF_SIZE ((size + 7) * SRC_BUF_STRIDE * SIZEOF_PIXEL) 539 #define src (buf + 3 * SIZEOF_PIXEL * (SRC_BUF_STRIDE + 1)) 541 #define randomize_buffers() \ 543 uint32_t mask = pixel_mask[(bit_depth - 8) >> 1]; \ 545 for (k = 0; k < SRC_BUF_SIZE; k += 4) { \ 546 uint32_t r = rnd() & mask; \ 547 AV_WN32A(buf + k, r); \ 550 for (k = 0; k < DST_BUF_SIZE; k += 4) { \ 551 uint32_t r = rnd() & mask; \ 552 AV_WN32A(dst0 + k, r); \ 553 AV_WN32A(dst1 + k, r); \ 567 int h,
int mx,
int my);
568 static const char *
const filter_names[4] = {
569 "8tap_smooth",
"8tap_regular",
"8tap_sharp",
"bilin" 571 static const char *
const subpel_names[2][2] = { {
"",
"h" }, {
"v",
"hv" } };
572 static const char *
const op_names[2] = {
"put",
"avg" };
575 for (op = 0; op < 2; op++) {
576 for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) {
578 for (hsize = 0; hsize < 5; hsize++) {
579 int size = 64 >> hsize;
581 for (filter = 0; filter < 4; filter++) {
582 for (dx = 0; dx < 2; dx++) {
583 for (dy = 0; dy < 2; dy++) {
586 "%s_%s_%d%s", op_names[op],
587 filter_names[filter], size,
588 subpel_names[dy][dx]);
591 "%s%d", op_names[op], size);
594 "vp9_%s_%dbpp", str, bit_depth)) {
595 int mx = dx ? 1 + (
rnd() % 14) : 0;
596 int my = dy ? 1 + (
rnd() % 14) : 0;
609 if (filter >= 1 && filter <= 2)
continue;
611 if (bit_depth == 12 && filter == 3)
continue;
vp9_mc_func mc[5][N_FILTERS][2][2][2]
static void check_loopfilter(void)
static __device__ float trunc(float a)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
static int copy_subcoefs(int16_t *out, const int16_t *in, enum TxfmMode tx, enum TxfmType txtp, int sz, int sub, int bit_depth)
void(* intra_pred[N_TXFM_SIZES][N_INTRA_PRED_MODES])(uint8_t *dst, ptrdiff_t stride, const uint8_t *left, const uint8_t *top)
static const uint8_t q1[256]
#define setsx(a, b, c, d)
The exact code depends on how similar the blocks are and how related they are to the block
static void randomize_loopfilter_buffers(int bidx, int lineoff, int str, int bit_depth, int dir, const int *E, const int *F, const int *H, const int *I, uint8_t *buf0, uint8_t *buf1)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static void fadst4_1d(double *out, const double *in, int sz)
#define LOCAL_ALIGNED_32(t, v,...)
#define AV_CPU_FLAG_MMXEXT
SSE integer functions or AMD MMX ext.
static void ftx(int16_t *buf, enum TxfmMode tx, enum TxfmType txtp, int sz, int bit_depth)
const int16_t *const ff_vp9_scans[5][4]
static void fwht_1d(double *out, const double *in, int sz)
av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact)
static int iszero(const int16_t *c, int sz)
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
static const uint16_t mask[17]
static void fadst_1d(double *out, const double *in, int sz)
static const uint8_t q0[256]
common internal API header
typedef void(APIENTRY *FF_PFNGLACTIVETEXTUREPROC)(GLenum texture)
static void bit_depth(AudioStatsContext *s, uint64_t mask, uint64_t imask, AVRational *depth)
void(* loop_filter_16[2])(uint8_t *dst, ptrdiff_t stride, int mb_lim, int lim, int hev_thr)
void(* ftx1d_fn)(double *out, const double *in, int sz)
#define declare_func_emms(cpu_flags, ret,...)
static void check_mc(void)
void(* loop_filter_mix2[2][2][2])(uint8_t *dst, ptrdiff_t stride, int mb_lim, int lim, int hev_thr)
void(* loop_filter_8[3][2])(uint8_t *dst, ptrdiff_t stride, int mb_lim, int lim, int hev_thr)
#define AV_CPU_FLAG_MMX
standard MMX
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
static const uint32_t pixel_mask[3]
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
#define check_func(func,...)
static void ftx_2d(double *out, const double *in, enum TxfmMode tx, enum TxfmType txtp, int sz)
#define randomize_buffers()
static int op(uint8_t **dst, const uint8_t *dst_end, GetByteContext *gb, int pixel, int count, int *x, int width, int linesize)
Perform decode operation.
GLint GLenum GLboolean GLsizei stride
void(* itxfm_add[N_TXFM_SIZES+1][N_TXFM_TYPES])(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob)
static void fdct_1d(double *out, const double *in, int sz)
common internal and external API header
static int ref[MAX_W *MAX_W]
void checkasm_check_vp9dsp(void)
static void check_ipred(void)
static void check_itxfm(void)
static float sub(float src0, float src1)
mode
Use these values in ebur128_init (or'ed).