30 static const uint32_t
pixel_mask[3] = { 0xffffffff, 0x01ff01ff, 0x03ff03ff };
32 #define SIZEOF_PIXEL ((bit_depth + 7) / 8)
33 #define SIZEOF_COEF (2 * ((bit_depth + 7) / 8))
34 #define PIXEL_STRIDE 16
36 #define randomize_buffers() \
39 uint32_t mask = pixel_mask[bit_depth - 8]; \
40 for (y = 0; y < sz; y++) { \
41 for (x = 0; x < PIXEL_STRIDE; x += 4) { \
42 AV_WN32A(src + y * PIXEL_STRIDE + x, rnd() & mask); \
43 AV_WN32A(dst + y * PIXEL_STRIDE + x, rnd() & mask); \
45 for (x = 0; x < sz; x++) { \
46 if (bit_depth == 8) { \
47 coef[y * sz + x] = src[y * PIXEL_STRIDE + x] - \
48 dst[y * PIXEL_STRIDE + x]; \
50 ((int32_t *)coef)[y * sz + x] = \
51 ((uint16_t *)src)[y * (PIXEL_STRIDE/2) + x] - \
52 ((uint16_t *)dst)[y * (PIXEL_STRIDE/2) + x]; \
58 #define dct4x4_impl(size, dctcoef) \
59 static void dct4x4_##size(dctcoef *coef) \
63 for (i = 0; i < 4; i++) { \
64 const int z0 = coef[i*4 + 0] + coef[i*4 + 3]; \
65 const int z1 = coef[i*4 + 1] + coef[i*4 + 2]; \
66 const int z2 = coef[i*4 + 0] - coef[i*4 + 3]; \
67 const int z3 = coef[i*4 + 1] - coef[i*4 + 2]; \
68 tmp[i + 4*0] = z0 + z1; \
69 tmp[i + 4*1] = 2*z2 + z3; \
70 tmp[i + 4*2] = z0 - z1; \
71 tmp[i + 4*3] = z2 - 2*z3; \
73 for (i = 0; i < 4; i++) { \
74 const int z0 = tmp[i*4 + 0] + tmp[i*4 + 3]; \
75 const int z1 = tmp[i*4 + 1] + tmp[i*4 + 2]; \
76 const int z2 = tmp[i*4 + 0] - tmp[i*4 + 3]; \
77 const int z3 = tmp[i*4 + 1] - tmp[i*4 + 2]; \
78 coef[i*4 + 0] = z0 + z1; \
79 coef[i*4 + 1] = 2*z2 + z3; \
80 coef[i*4 + 2] = z0 - z1; \
81 coef[i*4 + 3] = z2 - 2*z3; \
83 for (y = 0; y < 4; y++) { \
84 for (x = 0; x < 4; x++) { \
85 static const int scale[] = { 13107 * 10, 8066 * 13, 5243 * 16 }; \
86 const int idx = (y & 1) + (x & 1); \
87 coef[y*4 + x] = (coef[y*4 + x] * scale[idx] + (1 << 14)) >> 15; \
92 #define DCT8_1D(src, srcstride, dst, dststride) do { \
93 const int a0 = (src)[srcstride * 0] + (src)[srcstride * 7]; \
94 const int a1 = (src)[srcstride * 0] - (src)[srcstride * 7]; \
95 const int a2 = (src)[srcstride * 1] + (src)[srcstride * 6]; \
96 const int a3 = (src)[srcstride * 1] - (src)[srcstride * 6]; \
97 const int a4 = (src)[srcstride * 2] + (src)[srcstride * 5]; \
98 const int a5 = (src)[srcstride * 2] - (src)[srcstride * 5]; \
99 const int a6 = (src)[srcstride * 3] + (src)[srcstride * 4]; \
100 const int a7 = (src)[srcstride * 3] - (src)[srcstride * 4]; \
101 const int b0 = a0 + a6; \
102 const int b1 = a2 + a4; \
103 const int b2 = a0 - a6; \
104 const int b3 = a2 - a4; \
105 const int b4 = a3 + a5 + (a1 + (a1 >> 1)); \
106 const int b5 = a1 - a7 - (a5 + (a5 >> 1)); \
107 const int b6 = a1 + a7 - (a3 + (a3 >> 1)); \
108 const int b7 = a3 - a5 + (a7 + (a7 >> 1)); \
109 (dst)[dststride * 0] = b0 + b1; \
110 (dst)[dststride * 1] = b4 + (b7 >> 2); \
111 (dst)[dststride * 2] = b2 + (b3 >> 1); \
112 (dst)[dststride * 3] = b5 + (b6 >> 2); \
113 (dst)[dststride * 4] = b0 - b1; \
114 (dst)[dststride * 5] = b6 - (b5 >> 2); \
115 (dst)[dststride * 6] = (b2 >> 1) - b3; \
116 (dst)[dststride * 7] = (b4 >> 2) - b7; \
119 #define dct8x8_impl(size, dctcoef) \
120 static void dct8x8_##size(dctcoef *coef) \
124 for (i = 0; i < 8; i++) \
125 DCT8_1D(coef + i, 8, tmp + i, 8); \
127 for (i = 0; i < 8; i++) \
128 DCT8_1D(tmp + 8*i, 1, coef + i, 8); \
130 for (y = 0; y < 8; y++) { \
131 for (x = 0; x < 8; x++) { \
132 static const int scale[] = { \
133 13107 * 20, 11428 * 18, 20972 * 32, \
134 12222 * 19, 16777 * 25, 15481 * 24, \
136 static const int idxmap[] = { \
142 const int idx = idxmap[(y & 3) * 4 + (x & 3)]; \
143 coef[y*8 + x] = ((int64_t)coef[y*8 + x] * \
144 scale[idx] + (1 << 17)) >> 18; \
165 if (bit_depth == 8) {
186 for (bit_depth = 8; bit_depth <= 10; bit_depth++) {
188 for (sz = 4; sz <= 8; sz += 4) {
196 for (dc = 0; dc <= 1; dc++) {
198 switch ((sz << 1) |
dc) {
204 if (
check_func(
idct,
"h264_idct%d_add%s_%dbpp", sz, dc ?
"_dc" :
"", bit_depth)) {
205 for (align = 0; align < 16; align += sz *
SIZEOF_PIXEL) {
209 memcpy(subcoef0, coef, SIZEOF_COEF);
214 memcpy(dst1, dst, sz * PIXEL_STRIDE);
216 call_ref(dst0, subcoef0, PIXEL_STRIDE);
217 call_new(dst1, subcoef1, PIXEL_STRIDE);
218 if (memcmp(dst0, dst1, sz * PIXEL_STRIDE) ||
219 memcmp(subcoef0, subcoef1, sz * sz * SIZEOF_COEF))
242 for (bit_depth = 8; bit_depth <= 10; bit_depth++) {
244 for (func = 0; func < 3; func++) {
247 int sz = 4, intra = 0;
248 int block_offset[16] = { 0 };
252 name =
"h264_idct_add16";
256 name =
"h264_idct_add16intra";
261 name =
"h264_idct8_add4";
265 memset(nnzc, 0, 15 * 8);
267 for (i = 0; i < 16 * 16; i += sz * sz) {
270 int16_t coef[8 * 8 * 2];
272 int block_y = (index / 16) * sz;
273 int block_x = index % 16;
283 for (y = 0; y < sz; y++)
289 memcpy(&coef_full[i *
SIZEOF_COEF/
sizeof(coef[0])],
292 if (intra && nnz == 1)
295 nnzc[
scan8[i / 16]] = nnz;
296 block_offset[i / 16] =
offset;
void(* h264_idct_add)(uint8_t *dst, int16_t *block, int stride)
static void idct(int16_t block[64])
static void check_idct_multiple(void)
static void check_idct(void)
#define dct8x8_impl(size, dctcoef)
static const uint32_t pixel_mask[3]
void(* h264_idct_add16)(uint8_t *dst, const int *blockoffset, int16_t *block, int stride, const uint8_t nnzc[15 *8])
static void dct8x8(int16_t *coef, int bit_depth)
void(* h264_idct8_add)(uint8_t *dst, int16_t *block, int stride)
void checkasm_check_h264dsp(void)
static const uint8_t offset[127][2]
common internal API header
Context for storing H.264 DSP functions.
typedef void(APIENTRY *FF_PFNGLACTIVETEXTUREPROC)(GLenum texture)
static void bit_depth(AudioStatsContext *s, uint64_t mask, uint64_t imask, AVRational *depth)
void(* h264_idct_dc_add)(uint8_t *dst, int16_t *block, int stride)
#define declare_func_emms(cpu_flags, ret,...)
const AVS_VideoInfo int align
static void dct4x4(int16_t *coef)
Libavcodec external API header.
#define AV_CPU_FLAG_MMX
standard MMX
static const uint8_t scan8[16 *3+3]
void(* h264_idct8_add4)(uint8_t *dst, const int *blockoffset, int16_t *block, int stride, const uint8_t nnzc[15 *8])
#define check_func(func,...)
int(* func)(AVBPrint *dst, const char *in, const char *arg)
void(* h264_idct8_dc_add)(uint8_t *dst, int16_t *block, int stride)
GLint GLenum GLboolean GLsizei stride
common internal and external API header
#define dct4x4_impl(size, dctcoef)
void(* h264_idct_add16intra)(uint8_t *dst, const int *blockoffset, int16_t *block, int stride, const uint8_t nnzc[15 *8])
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
#define LOCAL_ALIGNED_16(t, v,...)
#define randomize_buffers()