36 #define IdctAdjustBeforeShift 8 45 #define M(a, b) ((int)((SUINT)(a) * (b)) >> 16) 52 int A,
B,
C,
D, Ad, Bd, Cd, Dd,
E,
F,
G,
H;
53 int Ed, Gd, Add, Bdd, Fd, Hd;
58 for (i = 0; i < 8; i++) {
60 if (ip[0 * 8] | ip[1 * 8] | ip[2 * 8] | ip[3 * 8] |
61 ip[4 * 8] | ip[5 * 8] | ip[6 * 8] | ip[7 * 8]) {
73 E =
M(
xC4S4, (ip[0 * 8] + ip[4 * 8]));
74 F =
M(
xC4S4, (ip[0 * 8] - ip[4 * 8]));
107 for (i = 0; i < 8; i++) {
109 if (ip[1] | ip[2] | ip[3] |
110 ip[4] | ip[5] | ip[6] | ip[7]) {
122 E =
M(
xC4S4, (ip[0] + ip[4])) + 8;
123 F =
M(
xC4S4, (ip[0] - ip[4])) + 8;
144 dst[0 *
stride] = av_clip_uint8((Gd + Cd) >> 4);
145 dst[7 *
stride] = av_clip_uint8((Gd - Cd) >> 4);
147 dst[1 *
stride] = av_clip_uint8((Add + Hd) >> 4);
148 dst[2 *
stride] = av_clip_uint8((Add - Hd) >> 4);
150 dst[3 *
stride] = av_clip_uint8((Ed + Dd) >> 4);
151 dst[4 *
stride] = av_clip_uint8((Ed - Dd) >> 4);
153 dst[5 *
stride] = av_clip_uint8((Fd + Bdd) >> 4);
154 dst[6 *
stride] = av_clip_uint8((Fd - Bdd) >> 4);
156 dst[0 *
stride] = av_clip_uint8(dst[0 * stride] + ((Gd + Cd) >> 4));
157 dst[7 *
stride] = av_clip_uint8(dst[7 * stride] + ((Gd - Cd) >> 4));
159 dst[1 *
stride] = av_clip_uint8(dst[1 * stride] + ((Add + Hd) >> 4));
160 dst[2 *
stride] = av_clip_uint8(dst[2 * stride] + ((Add - Hd) >> 4));
162 dst[3 *
stride] = av_clip_uint8(dst[3 * stride] + ((Ed + Dd) >> 4));
163 dst[4 *
stride] = av_clip_uint8(dst[4 * stride] + ((Ed - Dd) >> 4));
165 dst[5 *
stride] = av_clip_uint8(dst[5 * stride] + ((Fd + Bdd) >> 4));
166 dst[6 *
stride] = av_clip_uint8(dst[6 * stride] + ((Fd - Bdd) >> 4));
180 int v = (
xC4S4 * ip[0] + (IdctAdjustBeforeShift << 16)) >> 20;
181 dst[0 *
stride] = av_clip_uint8(dst[0 * stride] + v);
182 dst[1 *
stride] = av_clip_uint8(dst[1 * stride] + v);
183 dst[2 *
stride] = av_clip_uint8(dst[2 * stride] + v);
184 dst[3 *
stride] = av_clip_uint8(dst[3 * stride] + v);
185 dst[4 *
stride] = av_clip_uint8(dst[4 * stride] + v);
186 dst[5 *
stride] = av_clip_uint8(dst[5 * stride] + v);
187 dst[6 *
stride] = av_clip_uint8(dst[6 * stride] + v);
188 dst[7 *
stride] = av_clip_uint8(dst[7 * stride] + v);
203 int A,
B,
C,
D, Ad, Bd, Cd, Dd,
E,
F,
G,
H;
204 int Ed, Gd, Add, Bdd, Fd, Hd;
209 for (i = 0; i < 4; i++) {
211 if (ip[0 * 8] | ip[1 * 8] | ip[2 * 8] | ip[3 * 8]) {
242 ip[1 * 8] = Add + Hd;
243 ip[2 * 8] = Add - Hd;
248 ip[5 * 8] = Fd + Bdd;
249 ip[6 * 8] = Fd - Bdd;
258 for (i = 0; i < 8; i++) {
260 if (ip[0] | ip[1] | ip[2] | ip[3]) {
296 dst[0 *
stride] = av_clip_uint8((Gd + Cd) >> 4);
297 dst[7 *
stride] = av_clip_uint8((Gd - Cd) >> 4);
299 dst[1 *
stride] = av_clip_uint8((Add + Hd) >> 4);
300 dst[2 *
stride] = av_clip_uint8((Add - Hd) >> 4);
302 dst[3 *
stride] = av_clip_uint8((Ed + Dd) >> 4);
303 dst[4 *
stride] = av_clip_uint8((Ed - Dd) >> 4);
305 dst[5 *
stride] = av_clip_uint8((Fd + Bdd) >> 4);
306 dst[6 *
stride] = av_clip_uint8((Fd - Bdd) >> 4);
308 dst[0 *
stride] = av_clip_uint8(dst[0 * stride] + ((Gd + Cd) >> 4));
309 dst[7 *
stride] = av_clip_uint8(dst[7 * stride] + ((Gd - Cd) >> 4));
311 dst[1 *
stride] = av_clip_uint8(dst[1 * stride] + ((Add + Hd) >> 4));
312 dst[2 *
stride] = av_clip_uint8(dst[2 * stride] + ((Add - Hd) >> 4));
314 dst[3 *
stride] = av_clip_uint8(dst[3 * stride] + ((Ed + Dd) >> 4));
315 dst[4 *
stride] = av_clip_uint8(dst[4 * stride] + ((Ed - Dd) >> 4));
317 dst[5 *
stride] = av_clip_uint8(dst[5 * stride] + ((Fd + Bdd) >> 4));
318 dst[6 *
stride] = av_clip_uint8(dst[6 * stride] + ((Fd - Bdd) >> 4));
340 idct10(dest, stride, block, 1);
341 memset(block, 0,
sizeof(*block) * 64);
346 idct10(dest, stride, block, 2);
347 memset(block, 0,
sizeof(*block) * 64);
353 idct(dest, stride, block, 1);
354 memset(block, 0,
sizeof(*block) * 64);
360 idct(dest, stride, block, 2);
361 memset(block, 0,
sizeof(*block) * 64);
367 int i,
dc = (block[0] + 15) >> 5;
369 for (i = 0; i < 8; i++) {
370 dest[0] = av_clip_uint8(dest[0] + dc);
371 dest[1] = av_clip_uint8(dest[1] + dc);
372 dest[2] = av_clip_uint8(dest[2] + dc);
373 dest[3] = av_clip_uint8(dest[3] + dc);
374 dest[4] = av_clip_uint8(dest[4] + dc);
375 dest[5] = av_clip_uint8(dest[5] + dc);
376 dest[6] = av_clip_uint8(dest[6] + dc);
377 dest[7] = av_clip_uint8(dest[7] + dc);
384 int *bounding_values,
int count)
388 const ptrdiff_t nstride = -
stride;
390 for (end = first_pixel + count; first_pixel <
end; first_pixel++) {
391 filter_value = (first_pixel[2 * nstride] - first_pixel[
stride]) +
392 (first_pixel[0] - first_pixel[nstride]) * 3;
393 filter_value = bounding_values[(filter_value + 4) >> 3];
395 first_pixel[nstride] = av_clip_uint8(first_pixel[nstride] + filter_value);
396 first_pixel[0] = av_clip_uint8(first_pixel[0] - filter_value);
401 int *bounding_values,
int count)
406 for (end = first_pixel + count * stride; first_pixel !=
end; first_pixel +=
stride) {
407 filter_value = (first_pixel[-2] - first_pixel[1]) +
408 (first_pixel[ 0] - first_pixel[-1]) * 3;
409 filter_value = bounding_values[(filter_value + 4) >> 3];
411 first_pixel[-1] = av_clip_uint8(first_pixel[-1] + filter_value);
412 first_pixel[ 0] = av_clip_uint8(first_pixel[ 0] - filter_value);
416 #define LOOP_FILTER(prefix, suffix, dim, count) \ 417 void prefix##_##dim##_loop_filter_##count##suffix(uint8_t *first_pixel, ptrdiff_t stride, \ 418 int *bounding_values) \ 420 vp3_##dim##_loop_filter_c(first_pixel, stride, bounding_values, count); \ 433 for (i = 0; i <
h; i++) {
436 a =
AV_RN32(&src1[i * stride]);
437 b =
AV_RN32(&src2[i * stride]);
439 a =
AV_RN32(&src1[i * stride + 4]);
440 b =
AV_RN32(&src2[i * stride + 4]);
475 int *bounding_values = bounding_values_array + 127;
482 memset(bounding_values_array, 0, 256 *
sizeof(
int));
483 for (x = 0; x < filter_limit; x++) {
484 bounding_values[-x] = -x;
485 bounding_values[x] = x;
487 for (x = value = filter_limit; x < 128 &&
value; x++, value--) {
488 bounding_values[ x] =
value;
489 bounding_values[-x] = -
value;
492 bounding_values[128] =
value;
493 bounding_values[129] = bounding_values[130] = filter_limit * 0x02020202
U;
static void vp3_idct_put_c(uint8_t *dest, ptrdiff_t stride, int16_t *block)
void(* put_no_rnd_pixels_l2)(uint8_t *dst, const uint8_t *a, const uint8_t *b, ptrdiff_t stride, int h)
Copy 8xH pixels from source to destination buffer using a bilinear filter with no rounding (i...
void(* v_loop_filter)(uint8_t *src, ptrdiff_t stride, int *bounding_values)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Macro definitions for various function/variable attributes.
av_cold void ff_vp3dsp_init_mips(VP3DSPContext *c, int flags)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
void ff_vp3dsp_idct10_add(uint8_t *dest, ptrdiff_t stride, int16_t *block)
The exact code depends on how similar the blocks are and how related they are to the block
void ff_vp3dsp_idct10_put(uint8_t *dest, ptrdiff_t stride, int16_t *block)
static av_cold int end(AVCodecContext *avctx)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
GLsizei GLboolean const GLfloat * value
static av_always_inline void vp3_h_loop_filter_c(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values, int count)
static av_always_inline void vp3_v_loop_filter_c(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values, int count)
static void vp3_idct_add_c(uint8_t *dest, ptrdiff_t stride, int16_t *block)
void(* h_loop_filter_unaligned)(uint8_t *src, ptrdiff_t stride, int *bounding_values)
void(* idct_add)(uint8_t *dest, ptrdiff_t stride, int16_t *block)
void(* h_loop_filter)(uint8_t *src, ptrdiff_t stride, int *bounding_values)
simple assert() macros that are a bit more flexible than ISO C assert().
static uint32_t no_rnd_avg32(uint32_t a, uint32_t b)
static int filter_value(int in, int rrp[8], int v[9])
#define LOOP_FILTER(prefix, suffix, dim, count)
void(* idct_dc_add)(uint8_t *dest, ptrdiff_t stride, int16_t *block)
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Libavcodec external API header.
av_cold void ff_vp3dsp_init_arm(VP3DSPContext *c, int flags)
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff)*mv_scale Intra DC Prediction block[y][x] dc[1]
static void vp3_idct_dc_add_c(uint8_t *dest, ptrdiff_t stride, int16_t *block)
void ff_vp3dsp_init_x86(VP3DSPContext *c, int flags)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
static av_always_inline void idct10(uint8_t *dst, ptrdiff_t stride, int16_t *input, int type)
#define flags(name, subs,...)
GLint GLenum GLboolean GLsizei stride
common internal and external API header
void ff_vp3dsp_set_bounding_values(int *bounding_values_array, int filter_limit)
av_cold void ff_vp3dsp_init_ppc(VP3DSPContext *c, int flags)
static av_always_inline void idct(uint8_t *dst, ptrdiff_t stride, int16_t *input, int type)
#define IdctAdjustBeforeShift
void(* idct_put)(uint8_t *dest, ptrdiff_t stride, int16_t *block)
av_cold void ff_vp3dsp_init(VP3DSPContext *c, int flags)
void(* v_loop_filter_unaligned)(uint8_t *src, ptrdiff_t stride, int *bounding_values)