FFmpeg
vp8dsp_init.c
Go to the documentation of this file.
1 /*
2  * VP8 DSP functions x86-optimized
3  * Copyright (c) 2010 Ronald S. Bultje <rsbultje@gmail.com>
4  * Copyright (c) 2010 Fiona Glaser <fiona@x264.com>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "libavutil/attributes.h"
24 #include "libavutil/cpu.h"
25 #include "libavutil/mem_internal.h"
26 #include "libavutil/x86/cpu.h"
27 #include "libavcodec/vp8dsp.h"
28 
29 #if HAVE_X86ASM
30 
31 /*
32  * MC functions
33  */
34 void ff_put_vp8_epel4_h4_mmxext(uint8_t *dst, ptrdiff_t dststride,
35  const uint8_t *src, ptrdiff_t srcstride,
36  int height, int mx, int my);
37 void ff_put_vp8_epel4_h6_mmxext(uint8_t *dst, ptrdiff_t dststride,
38  const uint8_t *src, ptrdiff_t srcstride,
39  int height, int mx, int my);
40 void ff_put_vp8_epel4_v4_mmxext(uint8_t *dst, ptrdiff_t dststride,
41  const uint8_t *src, ptrdiff_t srcstride,
42  int height, int mx, int my);
43 void ff_put_vp8_epel4_v6_mmxext(uint8_t *dst, ptrdiff_t dststride,
44  const uint8_t *src, ptrdiff_t srcstride,
45  int height, int mx, int my);
46 
47 void ff_put_vp8_epel8_h4_sse2 (uint8_t *dst, ptrdiff_t dststride,
48  const uint8_t *src, ptrdiff_t srcstride,
49  int height, int mx, int my);
50 void ff_put_vp8_epel8_h6_sse2 (uint8_t *dst, ptrdiff_t dststride,
51  const uint8_t *src, ptrdiff_t srcstride,
52  int height, int mx, int my);
53 void ff_put_vp8_epel8_v4_sse2 (uint8_t *dst, ptrdiff_t dststride,
54  const uint8_t *src, ptrdiff_t srcstride,
55  int height, int mx, int my);
56 void ff_put_vp8_epel8_v6_sse2 (uint8_t *dst, ptrdiff_t dststride,
57  const uint8_t *src, ptrdiff_t srcstride,
58  int height, int mx, int my);
59 
60 void ff_put_vp8_epel4_h4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
61  const uint8_t *src, ptrdiff_t srcstride,
62  int height, int mx, int my);
63 void ff_put_vp8_epel4_h6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
64  const uint8_t *src, ptrdiff_t srcstride,
65  int height, int mx, int my);
66 void ff_put_vp8_epel4_v4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
67  const uint8_t *src, ptrdiff_t srcstride,
68  int height, int mx, int my);
69 void ff_put_vp8_epel4_v6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
70  const uint8_t *src, ptrdiff_t srcstride,
71  int height, int mx, int my);
72 void ff_put_vp8_epel8_h4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
73  const uint8_t *src, ptrdiff_t srcstride,
74  int height, int mx, int my);
75 void ff_put_vp8_epel8_h6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
76  const uint8_t *src, ptrdiff_t srcstride,
77  int height, int mx, int my);
78 void ff_put_vp8_epel8_v4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
79  const uint8_t *src, ptrdiff_t srcstride,
80  int height, int mx, int my);
81 void ff_put_vp8_epel8_v6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
82  const uint8_t *src, ptrdiff_t srcstride,
83  int height, int mx, int my);
84 
85 void ff_put_vp8_bilinear4_h_mmxext(uint8_t *dst, ptrdiff_t dststride,
86  const uint8_t *src, ptrdiff_t srcstride,
87  int height, int mx, int my);
88 void ff_put_vp8_bilinear8_h_sse2 (uint8_t *dst, ptrdiff_t dststride,
89  const uint8_t *src, ptrdiff_t srcstride,
90  int height, int mx, int my);
91 void ff_put_vp8_bilinear4_h_ssse3 (uint8_t *dst, ptrdiff_t dststride,
92  const uint8_t *src, ptrdiff_t srcstride,
93  int height, int mx, int my);
94 void ff_put_vp8_bilinear8_h_ssse3 (uint8_t *dst, ptrdiff_t dststride,
95  const uint8_t *src, ptrdiff_t srcstride,
96  int height, int mx, int my);
97 
98 void ff_put_vp8_bilinear4_v_mmxext(uint8_t *dst, ptrdiff_t dststride,
99  const uint8_t *src, ptrdiff_t srcstride,
100  int height, int mx, int my);
101 void ff_put_vp8_bilinear8_v_sse2 (uint8_t *dst, ptrdiff_t dststride,
102  const uint8_t *src, ptrdiff_t srcstride,
103  int height, int mx, int my);
104 void ff_put_vp8_bilinear4_v_ssse3 (uint8_t *dst, ptrdiff_t dststride,
105  const uint8_t *src, ptrdiff_t srcstride,
106  int height, int mx, int my);
107 void ff_put_vp8_bilinear8_v_ssse3 (uint8_t *dst, ptrdiff_t dststride,
108  const uint8_t *src, ptrdiff_t srcstride,
109  int height, int mx, int my);
110 
111 
112 void ff_put_vp8_pixels8_mmx (uint8_t *dst, ptrdiff_t dststride,
113  const uint8_t *src, ptrdiff_t srcstride,
114  int height, int mx, int my);
115 void ff_put_vp8_pixels16_sse(uint8_t *dst, ptrdiff_t dststride,
116  const uint8_t *src, ptrdiff_t srcstride,
117  int height, int mx, int my);
118 
119 #define TAP_W16(OPT, FILTERTYPE, TAPTYPE) \
120 static void ff_put_vp8_ ## FILTERTYPE ## 16_ ## TAPTYPE ## _ ## OPT( \
121  uint8_t *dst, ptrdiff_t dststride, const uint8_t *src, \
122  ptrdiff_t srcstride, int height, int mx, int my) \
123 { \
124  ff_put_vp8_ ## FILTERTYPE ## 8_ ## TAPTYPE ## _ ## OPT( \
125  dst, dststride, src, srcstride, height, mx, my); \
126  ff_put_vp8_ ## FILTERTYPE ## 8_ ## TAPTYPE ## _ ## OPT( \
127  dst + 8, dststride, src + 8, srcstride, height, mx, my); \
128 }
129 #define TAP_W8(OPT, FILTERTYPE, TAPTYPE) \
130 static void ff_put_vp8_ ## FILTERTYPE ## 8_ ## TAPTYPE ## _ ## OPT( \
131  uint8_t *dst, ptrdiff_t dststride, uint8_t *src, \
132  ptrdiff_t srcstride, int height, int mx, int my) \
133 { \
134  ff_put_vp8_ ## FILTERTYPE ## 4_ ## TAPTYPE ## _ ## OPT( \
135  dst, dststride, src, srcstride, height, mx, my); \
136  ff_put_vp8_ ## FILTERTYPE ## 4_ ## TAPTYPE ## _ ## OPT( \
137  dst + 4, dststride, src + 4, srcstride, height, mx, my); \
138 }
139 
140 TAP_W16(sse2, epel, h6)
141 TAP_W16(sse2, epel, v6)
142 TAP_W16(sse2, bilinear, h)
143 TAP_W16(sse2, bilinear, v)
144 
145 TAP_W16(ssse3, epel, h6)
146 TAP_W16(ssse3, epel, v6)
147 TAP_W16(ssse3, bilinear, h)
148 TAP_W16(ssse3, bilinear, v)
149 
150 #define HVTAP(OPT, ALIGN, TAPNUMX, TAPNUMY, SIZE, MAXHEIGHT) \
151 static void ff_put_vp8_epel ## SIZE ## _h ## TAPNUMX ## v ## TAPNUMY ## _ ## OPT( \
152  uint8_t *dst, ptrdiff_t dststride, const uint8_t *src, \
153  ptrdiff_t srcstride, int height, int mx, int my) \
154 { \
155  LOCAL_ALIGNED(ALIGN, uint8_t, tmp, [SIZE * (MAXHEIGHT + TAPNUMY - 1)]); \
156  uint8_t *tmpptr = tmp + SIZE * (TAPNUMY / 2 - 1); \
157  src -= srcstride * (TAPNUMY / 2 - 1); \
158  ff_put_vp8_epel ## SIZE ## _h ## TAPNUMX ## _ ## OPT( \
159  tmp, SIZE, src, srcstride, height + TAPNUMY - 1, mx, my); \
160  ff_put_vp8_epel ## SIZE ## _v ## TAPNUMY ## _ ## OPT( \
161  dst, dststride, tmpptr, SIZE, height, mx, my); \
162 }
163 
164 #define HVTAPMMX(x, y) \
165 HVTAP(mmxext, 8, x, y, 4, 8)
166 
167 HVTAPMMX(4, 4)
168 HVTAPMMX(4, 6)
169 HVTAPMMX(6, 4)
170 HVTAPMMX(6, 6)
171 
172 #define HVTAPSSE2(x, y, w) \
173 HVTAP(sse2, 16, x, y, w, 16) \
174 HVTAP(ssse3, 16, x, y, w, 16)
175 
176 HVTAPSSE2(4, 4, 8)
177 HVTAPSSE2(4, 6, 8)
178 HVTAPSSE2(6, 4, 8)
179 HVTAPSSE2(6, 6, 8)
180 HVTAPSSE2(6, 6, 16)
181 
182 HVTAP(ssse3, 16, 4, 4, 4, 8)
183 HVTAP(ssse3, 16, 4, 6, 4, 8)
184 HVTAP(ssse3, 16, 6, 4, 4, 8)
185 HVTAP(ssse3, 16, 6, 6, 4, 8)
186 
187 #define HVBILIN(OPT, ALIGN, SIZE, MAXHEIGHT) \
188 static void ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT( \
189  uint8_t *dst, ptrdiff_t dststride, const uint8_t *src, \
190  ptrdiff_t srcstride, int height, int mx, int my) \
191 { \
192  LOCAL_ALIGNED(ALIGN, uint8_t, tmp, [SIZE * (MAXHEIGHT + 2)]); \
193  ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT( \
194  tmp, SIZE, src, srcstride, height + 1, mx, my); \
195  ff_put_vp8_bilinear ## SIZE ## _v_ ## OPT( \
196  dst, dststride, tmp, SIZE, height, mx, my); \
197 }
198 
199 HVBILIN(mmxext, 8, 4, 8)
200 HVBILIN(sse2, 8, 8, 16)
201 HVBILIN(sse2, 8, 16, 16)
202 HVBILIN(ssse3, 8, 4, 8)
203 HVBILIN(ssse3, 8, 8, 16)
204 HVBILIN(ssse3, 8, 16, 16)
205 
206 void ff_vp8_idct_dc_add_sse2(uint8_t *dst, int16_t block[16],
207  ptrdiff_t stride);
208 void ff_vp8_idct_dc_add_sse4(uint8_t *dst, int16_t block[16],
209  ptrdiff_t stride);
210 void ff_vp8_idct_dc_add4y_sse2(uint8_t *dst, int16_t block[4][16],
211  ptrdiff_t stride);
212 void ff_vp8_idct_dc_add4uv_mmx(uint8_t *dst, int16_t block[2][16],
213  ptrdiff_t stride);
214 void ff_vp8_luma_dc_wht_sse(int16_t block[4][4][16], int16_t dc[16]);
215 void ff_vp8_idct_add_sse(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
216 
217 #define DECLARE_LOOP_FILTER(NAME) \
218 void ff_vp8_v_loop_filter_simple_ ## NAME(uint8_t *dst, \
219  ptrdiff_t stride, \
220  int flim); \
221 void ff_vp8_h_loop_filter_simple_ ## NAME(uint8_t *dst, \
222  ptrdiff_t stride, \
223  int flim); \
224 void ff_vp8_v_loop_filter16y_inner_ ## NAME (uint8_t *dst, \
225  ptrdiff_t stride, \
226  int e, int i, int hvt); \
227 void ff_vp8_h_loop_filter16y_inner_ ## NAME (uint8_t *dst, \
228  ptrdiff_t stride, \
229  int e, int i, int hvt); \
230 void ff_vp8_v_loop_filter8uv_inner_ ## NAME (uint8_t *dstU, \
231  uint8_t *dstV, \
232  ptrdiff_t s, \
233  int e, int i, int hvt); \
234 void ff_vp8_h_loop_filter8uv_inner_ ## NAME (uint8_t *dstU, \
235  uint8_t *dstV, \
236  ptrdiff_t s, \
237  int e, int i, int hvt); \
238 void ff_vp8_v_loop_filter16y_mbedge_ ## NAME(uint8_t *dst, \
239  ptrdiff_t stride, \
240  int e, int i, int hvt); \
241 void ff_vp8_h_loop_filter16y_mbedge_ ## NAME(uint8_t *dst, \
242  ptrdiff_t stride, \
243  int e, int i, int hvt); \
244 void ff_vp8_v_loop_filter8uv_mbedge_ ## NAME(uint8_t *dstU, \
245  uint8_t *dstV, \
246  ptrdiff_t s, \
247  int e, int i, int hvt); \
248 void ff_vp8_h_loop_filter8uv_mbedge_ ## NAME(uint8_t *dstU, \
249  uint8_t *dstV, \
250  ptrdiff_t s, \
251  int e, int i, int hvt);
252 
253 DECLARE_LOOP_FILTER(sse2)
254 DECLARE_LOOP_FILTER(ssse3)
255 DECLARE_LOOP_FILTER(sse4)
256 
257 #endif /* HAVE_X86ASM */
258 
259 #define VP8_LUMA_MC_FUNC(IDX, SIZE, OPT) \
260  c->put_vp8_epel_pixels_tab[IDX][0][2] = ff_put_vp8_epel ## SIZE ## _h6_ ## OPT; \
261  c->put_vp8_epel_pixels_tab[IDX][2][0] = ff_put_vp8_epel ## SIZE ## _v6_ ## OPT; \
262  c->put_vp8_epel_pixels_tab[IDX][2][2] = ff_put_vp8_epel ## SIZE ## _h6v6_ ## OPT
263 
264 #define VP8_MC_FUNC(IDX, SIZE, OPT) \
265  c->put_vp8_epel_pixels_tab[IDX][0][1] = ff_put_vp8_epel ## SIZE ## _h4_ ## OPT; \
266  c->put_vp8_epel_pixels_tab[IDX][1][0] = ff_put_vp8_epel ## SIZE ## _v4_ ## OPT; \
267  c->put_vp8_epel_pixels_tab[IDX][1][1] = ff_put_vp8_epel ## SIZE ## _h4v4_ ## OPT; \
268  c->put_vp8_epel_pixels_tab[IDX][1][2] = ff_put_vp8_epel ## SIZE ## _h6v4_ ## OPT; \
269  c->put_vp8_epel_pixels_tab[IDX][2][1] = ff_put_vp8_epel ## SIZE ## _h4v6_ ## OPT; \
270  VP8_LUMA_MC_FUNC(IDX, SIZE, OPT)
271 
272 #define VP8_BILINEAR_MC_FUNC(IDX, SIZE, OPT) \
273  c->put_vp8_bilinear_pixels_tab[IDX][0][1] = ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT; \
274  c->put_vp8_bilinear_pixels_tab[IDX][0][2] = ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT; \
275  c->put_vp8_bilinear_pixels_tab[IDX][1][0] = ff_put_vp8_bilinear ## SIZE ## _v_ ## OPT; \
276  c->put_vp8_bilinear_pixels_tab[IDX][1][1] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \
277  c->put_vp8_bilinear_pixels_tab[IDX][1][2] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \
278  c->put_vp8_bilinear_pixels_tab[IDX][2][0] = ff_put_vp8_bilinear ## SIZE ## _v_ ## OPT; \
279  c->put_vp8_bilinear_pixels_tab[IDX][2][1] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \
280  c->put_vp8_bilinear_pixels_tab[IDX][2][2] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT
281 
282 
284 {
285 #if HAVE_X86ASM
286  int cpu_flags = av_get_cpu_flags();
287 
288  if (EXTERNAL_MMX(cpu_flags)) {
289  c->put_vp8_epel_pixels_tab[1][0][0] =
290  c->put_vp8_bilinear_pixels_tab[1][0][0] = ff_put_vp8_pixels8_mmx;
291  }
292 
293  /* note that 4-tap width=16 functions are missing because w=16
294  * is only used for luma, and luma is always a copy or sixtap. */
295  if (EXTERNAL_MMXEXT(cpu_flags)) {
296  VP8_MC_FUNC(2, 4, mmxext);
297  VP8_BILINEAR_MC_FUNC(2, 4, mmxext);
298  }
299 
300  if (EXTERNAL_SSE(cpu_flags)) {
301  c->put_vp8_epel_pixels_tab[0][0][0] =
302  c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_sse;
303  }
304 
306  VP8_LUMA_MC_FUNC(0, 16, sse2);
307  VP8_MC_FUNC(1, 8, sse2);
308  VP8_BILINEAR_MC_FUNC(0, 16, sse2);
309  VP8_BILINEAR_MC_FUNC(1, 8, sse2);
310  }
311 
312  if (EXTERNAL_SSSE3(cpu_flags)) {
313  VP8_LUMA_MC_FUNC(0, 16, ssse3);
314  VP8_MC_FUNC(1, 8, ssse3);
315  VP8_MC_FUNC(2, 4, ssse3);
316  VP8_BILINEAR_MC_FUNC(0, 16, ssse3);
317  VP8_BILINEAR_MC_FUNC(1, 8, ssse3);
318  VP8_BILINEAR_MC_FUNC(2, 4, ssse3);
319  }
320 #endif /* HAVE_X86ASM */
321 }
322 
324 {
325 #if HAVE_X86ASM
326  int cpu_flags = av_get_cpu_flags();
327 
328  if (EXTERNAL_MMX(cpu_flags)) {
329  c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_mmx;
330  }
331 
332  if (EXTERNAL_SSE(cpu_flags)) {
333  c->vp8_idct_add = ff_vp8_idct_add_sse;
334  c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_sse;
335  }
336 
338  c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_sse2;
339 
340  c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_sse2;
341  c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_sse2;
342 
343  c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_sse2;
344  c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_sse2;
345  }
346 
347  if (EXTERNAL_SSE2(cpu_flags)) {
348  c->vp8_idct_dc_add = ff_vp8_idct_dc_add_sse2;
349  c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_sse2;
350 
351  c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse2;
352 
353  c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_sse2;
354  c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_sse2;
355 
356  c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_sse2;
357  c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_sse2;
358  }
359 
360  if (EXTERNAL_SSSE3(cpu_flags)) {
361  c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_ssse3;
362  c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_ssse3;
363 
364  c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_ssse3;
365  c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_ssse3;
366  c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_ssse3;
367  c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_ssse3;
368 
369  c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_ssse3;
370  c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_ssse3;
371  c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_ssse3;
372  c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_ssse3;
373  }
374 
375  if (EXTERNAL_SSE4(cpu_flags)) {
376  c->vp8_idct_dc_add = ff_vp8_idct_dc_add_sse4;
377 
378  c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse4;
379  c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_sse4;
380  c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_sse4;
381  }
382 #endif /* HAVE_X86ASM */
383 }
cpu.h
mem_internal.h
av_get_cpu_flags
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
Definition: cpu.c:103
VP8_BILINEAR_MC_FUNC
#define VP8_BILINEAR_MC_FUNC(IDX, SIZE, OPT)
Definition: vp8dsp_init.c:272
cpu_flags
static atomic_int cpu_flags
Definition: cpu.c:52
ff_vp8dsp_init_x86
av_cold void ff_vp8dsp_init_x86(VP8DSPContext *c)
Definition: vp8dsp_init.c:323
av_cold
#define av_cold
Definition: attributes.h:90
vp8dsp.h
EXTERNAL_SSE
#define EXTERNAL_SSE(flags)
Definition: cpu.h:58
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
VP8DSPContext
Definition: vp8dsp.h:37
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
cpu.h
height
#define height
attributes.h
EXTERNAL_SSE2
#define EXTERNAL_SSE2(flags)
Definition: cpu.h:59
VP8_MC_FUNC
#define VP8_MC_FUNC(IDX, SIZE, OPT)
Definition: vp8dsp_init.c:264
stride
#define stride
Definition: h264pred_template.c:537
EXTERNAL_SSE4
#define EXTERNAL_SSE4(flags)
Definition: cpu.h:68
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
h
h
Definition: vp9dsp_template.c:2038
EXTERNAL_SSSE3
#define EXTERNAL_SSSE3(flags)
Definition: cpu.h:65
EXTERNAL_MMX
#define EXTERNAL_MMX(flags)
Definition: cpu.h:56
EXTERNAL_SSE2_SLOW
#define EXTERNAL_SSE2_SLOW(flags)
Definition: cpu.h:61
EXTERNAL_MMXEXT
#define EXTERNAL_MMXEXT(flags)
Definition: cpu.h:57
ff_vp78dsp_init_x86
av_cold void ff_vp78dsp_init_x86(VP8DSPContext *c)
Definition: vp8dsp_init.c:283
VP8_LUMA_MC_FUNC
#define VP8_LUMA_MC_FUNC(IDX, SIZE, OPT)
Definition: vp8dsp_init.c:259