FFmpeg
vp8dsp.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2010 David Conrad
3  * Copyright (C) 2010 Ronald S. Bultje
4  * Copyright (C) 2014 Peter Ross
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * VP8 compatible video decoder
26  */
27 
28 #include "config_components.h"
29 
30 #include "libavutil/common.h"
31 #include "libavutil/intreadwrite.h"
32 
33 #include "mathops.h"
34 #include "vp8dsp.h"
35 
36 #define MK_IDCT_DC_ADD4_C(name) \
37 static void name ## _idct_dc_add4uv_c(uint8_t *dst, int16_t block[4][16], \
38  ptrdiff_t stride) \
39 { \
40  name ## _idct_dc_add_c(dst + stride * 0 + 0, block[0], stride); \
41  name ## _idct_dc_add_c(dst + stride * 0 + 4, block[1], stride); \
42  name ## _idct_dc_add_c(dst + stride * 4 + 0, block[2], stride); \
43  name ## _idct_dc_add_c(dst + stride * 4 + 4, block[3], stride); \
44 } \
45  \
46 static void name ## _idct_dc_add4y_c(uint8_t *dst, int16_t block[4][16], \
47  ptrdiff_t stride) \
48 { \
49  name ## _idct_dc_add_c(dst + 0, block[0], stride); \
50  name ## _idct_dc_add_c(dst + 4, block[1], stride); \
51  name ## _idct_dc_add_c(dst + 8, block[2], stride); \
52  name ## _idct_dc_add_c(dst + 12, block[3], stride); \
53 }
54 
55 #if CONFIG_VP7_DECODER
56 static void vp7_luma_dc_wht_c(int16_t block[4][4][16], int16_t dc[16])
57 {
58  int i;
59  unsigned a1, b1, c1, d1;
60  int16_t tmp[16];
61 
62  for (i = 0; i < 4; i++) {
63  a1 = (dc[i * 4 + 0] + dc[i * 4 + 2]) * 23170;
64  b1 = (dc[i * 4 + 0] - dc[i * 4 + 2]) * 23170;
65  c1 = dc[i * 4 + 1] * 12540 - dc[i * 4 + 3] * 30274;
66  d1 = dc[i * 4 + 1] * 30274 + dc[i * 4 + 3] * 12540;
67  tmp[i * 4 + 0] = (int)(a1 + d1) >> 14;
68  tmp[i * 4 + 3] = (int)(a1 - d1) >> 14;
69  tmp[i * 4 + 1] = (int)(b1 + c1) >> 14;
70  tmp[i * 4 + 2] = (int)(b1 - c1) >> 14;
71  }
72 
73  for (i = 0; i < 4; i++) {
74  a1 = (tmp[i + 0] + tmp[i + 8]) * 23170;
75  b1 = (tmp[i + 0] - tmp[i + 8]) * 23170;
76  c1 = tmp[i + 4] * 12540 - tmp[i + 12] * 30274;
77  d1 = tmp[i + 4] * 30274 + tmp[i + 12] * 12540;
78  AV_ZERO64(dc + i * 4);
79  block[0][i][0] = (int)(a1 + d1 + 0x20000) >> 18;
80  block[3][i][0] = (int)(a1 - d1 + 0x20000) >> 18;
81  block[1][i][0] = (int)(b1 + c1 + 0x20000) >> 18;
82  block[2][i][0] = (int)(b1 - c1 + 0x20000) >> 18;
83  }
84 }
85 
86 static void vp7_luma_dc_wht_dc_c(int16_t block[4][4][16], int16_t dc[16])
87 {
88  int i, val = (23170 * (23170 * dc[0] >> 14) + 0x20000) >> 18;
89  dc[0] = 0;
90 
91  for (i = 0; i < 4; i++) {
92  block[i][0][0] = val;
93  block[i][1][0] = val;
94  block[i][2][0] = val;
95  block[i][3][0] = val;
96  }
97 }
98 
99 static void vp7_idct_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
100 {
101  int i;
102  unsigned a1, b1, c1, d1;
103  int16_t tmp[16];
104 
105  for (i = 0; i < 4; i++) {
106  a1 = (block[i * 4 + 0] + block[i * 4 + 2]) * 23170;
107  b1 = (block[i * 4 + 0] - block[i * 4 + 2]) * 23170;
108  c1 = block[i * 4 + 1] * 12540 - block[i * 4 + 3] * 30274;
109  d1 = block[i * 4 + 1] * 30274 + block[i * 4 + 3] * 12540;
110  AV_ZERO64(block + i * 4);
111  tmp[i * 4 + 0] = (int)(a1 + d1) >> 14;
112  tmp[i * 4 + 3] = (int)(a1 - d1) >> 14;
113  tmp[i * 4 + 1] = (int)(b1 + c1) >> 14;
114  tmp[i * 4 + 2] = (int)(b1 - c1) >> 14;
115  }
116 
117  for (i = 0; i < 4; i++) {
118  a1 = (tmp[i + 0] + tmp[i + 8]) * 23170;
119  b1 = (tmp[i + 0] - tmp[i + 8]) * 23170;
120  c1 = tmp[i + 4] * 12540 - tmp[i + 12] * 30274;
121  d1 = tmp[i + 4] * 30274 + tmp[i + 12] * 12540;
122  dst[0 * stride + i] = av_clip_uint8(dst[0 * stride + i] +
123  ((int)(a1 + d1 + 0x20000) >> 18));
124  dst[3 * stride + i] = av_clip_uint8(dst[3 * stride + i] +
125  ((int)(a1 - d1 + 0x20000) >> 18));
126  dst[1 * stride + i] = av_clip_uint8(dst[1 * stride + i] +
127  ((int)(b1 + c1 + 0x20000) >> 18));
128  dst[2 * stride + i] = av_clip_uint8(dst[2 * stride + i] +
129  ((int)(b1 - c1 + 0x20000) >> 18));
130  }
131 }
132 
133 static void vp7_idct_dc_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
134 {
135  int i, dc = (23170 * (23170 * block[0] >> 14) + 0x20000) >> 18;
136  block[0] = 0;
137 
138  for (i = 0; i < 4; i++) {
139  dst[0] = av_clip_uint8(dst[0] + dc);
140  dst[1] = av_clip_uint8(dst[1] + dc);
141  dst[2] = av_clip_uint8(dst[2] + dc);
142  dst[3] = av_clip_uint8(dst[3] + dc);
143  dst += stride;
144  }
145 }
146 
148 #endif /* CONFIG_VP7_DECODER */
149 
150 // TODO: Maybe add dequant
151 #if CONFIG_VP8_DECODER
152 static void vp8_luma_dc_wht_c(int16_t block[4][4][16], int16_t dc[16])
153 {
154  int i, t0, t1, t2, t3;
155 
156  for (i = 0; i < 4; i++) {
157  t0 = dc[0 * 4 + i] + dc[3 * 4 + i];
158  t1 = dc[1 * 4 + i] + dc[2 * 4 + i];
159  t2 = dc[1 * 4 + i] - dc[2 * 4 + i];
160  t3 = dc[0 * 4 + i] - dc[3 * 4 + i];
161 
162  dc[0 * 4 + i] = t0 + t1;
163  dc[1 * 4 + i] = t3 + t2;
164  dc[2 * 4 + i] = t0 - t1;
165  dc[3 * 4 + i] = t3 - t2;
166  }
167 
168  for (i = 0; i < 4; i++) {
169  t0 = dc[i * 4 + 0] + dc[i * 4 + 3] + 3; // rounding
170  t1 = dc[i * 4 + 1] + dc[i * 4 + 2];
171  t2 = dc[i * 4 + 1] - dc[i * 4 + 2];
172  t3 = dc[i * 4 + 0] - dc[i * 4 + 3] + 3; // rounding
173  AV_ZERO64(dc + i * 4);
174 
175  block[i][0][0] = (t0 + t1) >> 3;
176  block[i][1][0] = (t3 + t2) >> 3;
177  block[i][2][0] = (t0 - t1) >> 3;
178  block[i][3][0] = (t3 - t2) >> 3;
179  }
180 }
181 
182 static void vp8_luma_dc_wht_dc_c(int16_t block[4][4][16], int16_t dc[16])
183 {
184  int i, val = (dc[0] + 3) >> 3;
185  dc[0] = 0;
186 
187  for (i = 0; i < 4; i++) {
188  block[i][0][0] = val;
189  block[i][1][0] = val;
190  block[i][2][0] = val;
191  block[i][3][0] = val;
192  }
193 }
194 
195 #define MUL_20091(a) ((((a) * 20091) >> 16) + (a))
196 #define MUL_35468(a) (((a) * 35468) >> 16)
197 
198 static void vp8_idct_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
199 {
200  int i, t0, t1, t2, t3;
201  int16_t tmp[16];
202 
203  for (i = 0; i < 4; i++) {
204  t0 = block[0 * 4 + i] + block[2 * 4 + i];
205  t1 = block[0 * 4 + i] - block[2 * 4 + i];
206  t2 = MUL_35468(block[1 * 4 + i]) - MUL_20091(block[3 * 4 + i]);
207  t3 = MUL_20091(block[1 * 4 + i]) + MUL_35468(block[3 * 4 + i]);
208  block[0 * 4 + i] = 0;
209  block[1 * 4 + i] = 0;
210  block[2 * 4 + i] = 0;
211  block[3 * 4 + i] = 0;
212 
213  tmp[i * 4 + 0] = t0 + t3;
214  tmp[i * 4 + 1] = t1 + t2;
215  tmp[i * 4 + 2] = t1 - t2;
216  tmp[i * 4 + 3] = t0 - t3;
217  }
218 
219  for (i = 0; i < 4; i++) {
220  t0 = tmp[0 * 4 + i] + tmp[2 * 4 + i];
221  t1 = tmp[0 * 4 + i] - tmp[2 * 4 + i];
222  t2 = MUL_35468(tmp[1 * 4 + i]) - MUL_20091(tmp[3 * 4 + i]);
223  t3 = MUL_20091(tmp[1 * 4 + i]) + MUL_35468(tmp[3 * 4 + i]);
224 
225  dst[0] = av_clip_uint8(dst[0] + ((t0 + t3 + 4) >> 3));
226  dst[1] = av_clip_uint8(dst[1] + ((t1 + t2 + 4) >> 3));
227  dst[2] = av_clip_uint8(dst[2] + ((t1 - t2 + 4) >> 3));
228  dst[3] = av_clip_uint8(dst[3] + ((t0 - t3 + 4) >> 3));
229  dst += stride;
230  }
231 }
232 
233 static void vp8_idct_dc_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
234 {
235  int i, dc = (block[0] + 4) >> 3;
236  block[0] = 0;
237 
238  for (i = 0; i < 4; i++) {
239  dst[0] = av_clip_uint8(dst[0] + dc);
240  dst[1] = av_clip_uint8(dst[1] + dc);
241  dst[2] = av_clip_uint8(dst[2] + dc);
242  dst[3] = av_clip_uint8(dst[3] + dc);
243  dst += stride;
244  }
245 }
246 
248 #endif /* CONFIG_VP8_DECODER */
249 
250 // because I like only having two parameters to pass functions...
251 #define LOAD_PIXELS \
252  int av_unused p3 = p[-4 * stride]; \
253  int av_unused p2 = p[-3 * stride]; \
254  int av_unused p1 = p[-2 * stride]; \
255  int av_unused p0 = p[-1 * stride]; \
256  int av_unused q0 = p[ 0 * stride]; \
257  int av_unused q1 = p[ 1 * stride]; \
258  int av_unused q2 = p[ 2 * stride]; \
259  int av_unused q3 = p[ 3 * stride];
260 
261 #define clip_int8(n) (cm[(n) + 0x80] - 0x80)
262 
263 static av_always_inline void filter_common(uint8_t *p, ptrdiff_t stride,
264  int is4tap, int is_vp7)
265 {
267  int a, f1, f2;
268  const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
269 
270  a = 3 * (q0 - p0);
271 
272  if (is4tap)
273  a += clip_int8(p1 - q1);
274 
275  a = clip_int8(a);
276 
277  // We deviate from the spec here with c(a+3) >> 3
278  // since that's what libvpx does.
279  f1 = FFMIN(a + 4, 127) >> 3;
280 
281  if (is_vp7)
282  f2 = f1 - ((a & 7) == 4);
283  else
284  f2 = FFMIN(a + 3, 127) >> 3;
285 
286  // Despite what the spec says, we do need to clamp here to
287  // be bitexact with libvpx.
288  p[-1 * stride] = cm[p0 + f2];
289  p[ 0 * stride] = cm[q0 - f1];
290 
291  // only used for _inner on blocks without high edge variance
292  if (!is4tap) {
293  a = (f1 + 1) >> 1;
294  p[-2 * stride] = cm[p1 + a];
295  p[ 1 * stride] = cm[q1 - a];
296  }
297 }
298 
299 static av_always_inline void vp7_filter_common(uint8_t *p, ptrdiff_t stride,
300  int is4tap)
301 {
302  filter_common(p, stride, is4tap, IS_VP7);
303 }
304 
305 static av_always_inline void vp8_filter_common(uint8_t *p, ptrdiff_t stride,
306  int is4tap)
307 {
308  filter_common(p, stride, is4tap, IS_VP8);
309 }
310 
311 static av_always_inline int vp7_simple_limit(uint8_t *p, ptrdiff_t stride,
312  int flim)
313 {
315  return FFABS(p0 - q0) <= flim;
316 }
317 
318 static av_always_inline int vp8_simple_limit(uint8_t *p, ptrdiff_t stride,
319  int flim)
320 {
322  return 2 * FFABS(p0 - q0) + (FFABS(p1 - q1) >> 1) <= flim;
323 }
324 
325 /**
326  * E - limit at the macroblock edge
327  * I - limit for interior difference
328  */
329 #define NORMAL_LIMIT(vpn) \
330 static av_always_inline int vp ## vpn ## _normal_limit(uint8_t *p, \
331  ptrdiff_t stride, \
332  int E, int I) \
333 { \
334  LOAD_PIXELS \
335  return vp ## vpn ## _simple_limit(p, stride, E) && \
336  FFABS(p3 - p2) <= I && FFABS(p2 - p1) <= I && \
337  FFABS(p1 - p0) <= I && FFABS(q3 - q2) <= I && \
338  FFABS(q2 - q1) <= I && FFABS(q1 - q0) <= I; \
339 }
340 
341 NORMAL_LIMIT(7)
342 NORMAL_LIMIT(8)
343 
344 // high edge variance
345 static av_always_inline int hev(uint8_t *p, ptrdiff_t stride, int thresh)
346 {
348  return FFABS(p1 - p0) > thresh || FFABS(q1 - q0) > thresh;
349 }
350 
351 static av_always_inline void filter_mbedge(uint8_t *p, ptrdiff_t stride)
352 {
353  int a0, a1, a2, w;
354  const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
355 
357 
358  w = clip_int8(p1 - q1);
359  w = clip_int8(w + 3 * (q0 - p0));
360 
361  a0 = (27 * w + 63) >> 7;
362  a1 = (18 * w + 63) >> 7;
363  a2 = (9 * w + 63) >> 7;
364 
365  p[-3 * stride] = cm[p2 + a2];
366  p[-2 * stride] = cm[p1 + a1];
367  p[-1 * stride] = cm[p0 + a0];
368  p[ 0 * stride] = cm[q0 - a0];
369  p[ 1 * stride] = cm[q1 - a1];
370  p[ 2 * stride] = cm[q2 - a2];
371 }
372 
373 #define LOOP_FILTER(vpn, dir, size, stridea, strideb, maybe_inline) \
374 static maybe_inline \
375 void vpn ## _ ## dir ## _loop_filter ## size ## _c(uint8_t *dst, \
376  ptrdiff_t stride, \
377  int flim_E, int flim_I, \
378  int hev_thresh) \
379 { \
380  int i; \
381  for (i = 0; i < size; i++) \
382  if (vpn ## _normal_limit(dst + i * stridea, strideb, \
383  flim_E, flim_I)) { \
384  if (hev(dst + i * stridea, strideb, hev_thresh)) \
385  vpn ## _filter_common(dst + i * stridea, strideb, 1); \
386  else \
387  filter_mbedge(dst + i * stridea, strideb); \
388  } \
389 } \
390  \
391 static maybe_inline \
392 void vpn ## _ ## dir ## _loop_filter ## size ## _inner_c(uint8_t *dst, \
393  ptrdiff_t stride, \
394  int flim_E, \
395  int flim_I, \
396  int hev_thresh) \
397 { \
398  int i; \
399  for (i = 0; i < size; i++) \
400  if (vpn ## _normal_limit(dst + i * stridea, strideb, \
401  flim_E, flim_I)) { \
402  int hv = hev(dst + i * stridea, strideb, hev_thresh); \
403  if (hv) \
404  vpn ## _filter_common(dst + i * stridea, strideb, 1); \
405  else \
406  vpn ## _filter_common(dst + i * stridea, strideb, 0); \
407  } \
408 }
409 
410 #define UV_LOOP_FILTER(vpn, dir, stridea, strideb) \
411 LOOP_FILTER(vpn, dir, 8, stridea, strideb, av_always_inline) \
412 static void vpn ## _ ## dir ## _loop_filter8uv_c(uint8_t *dstU, \
413  uint8_t *dstV, \
414  ptrdiff_t stride, int fE, \
415  int fI, int hev_thresh) \
416 { \
417  vpn ## _ ## dir ## _loop_filter8_c(dstU, stride, fE, fI, hev_thresh); \
418  vpn ## _ ## dir ## _loop_filter8_c(dstV, stride, fE, fI, hev_thresh); \
419 } \
420  \
421 static void vpn ## _ ## dir ## _loop_filter8uv_inner_c(uint8_t *dstU, \
422  uint8_t *dstV, \
423  ptrdiff_t stride, \
424  int fE, int fI, \
425  int hev_thresh) \
426 { \
427  vpn ## _ ## dir ## _loop_filter8_inner_c(dstU, stride, fE, fI, \
428  hev_thresh); \
429  vpn ## _ ## dir ## _loop_filter8_inner_c(dstV, stride, fE, fI, \
430  hev_thresh); \
431 }
432 
433 #define LOOP_FILTER_SIMPLE(vpn) \
434 static void vpn ## _v_loop_filter_simple_c(uint8_t *dst, ptrdiff_t stride, \
435  int flim) \
436 { \
437  int i; \
438  for (i = 0; i < 16; i++) \
439  if (vpn ## _simple_limit(dst + i, stride, flim)) \
440  vpn ## _filter_common(dst + i, stride, 1); \
441 } \
442  \
443 static void vpn ## _h_loop_filter_simple_c(uint8_t *dst, ptrdiff_t stride, \
444  int flim) \
445 { \
446  int i; \
447  for (i = 0; i < 16; i++) \
448  if (vpn ## _simple_limit(dst + i * stride, 1, flim)) \
449  vpn ## _filter_common(dst + i * stride, 1, 1); \
450 }
451 
452 #define LOOP_FILTERS(vpn) \
453  LOOP_FILTER(vpn, v, 16, 1, stride, ) \
454  LOOP_FILTER(vpn, h, 16, stride, 1, ) \
455  UV_LOOP_FILTER(vpn, v, 1, stride) \
456  UV_LOOP_FILTER(vpn, h, stride, 1) \
457  LOOP_FILTER_SIMPLE(vpn) \
458 
459 static const uint8_t subpel_filters[7][6] = {
460  { 0, 6, 123, 12, 1, 0 },
461  { 2, 11, 108, 36, 8, 1 },
462  { 0, 9, 93, 50, 6, 0 },
463  { 3, 16, 77, 77, 16, 3 },
464  { 0, 6, 50, 93, 9, 0 },
465  { 1, 8, 36, 108, 11, 2 },
466  { 0, 1, 12, 123, 6, 0 },
467 };
468 
469 #define PUT_PIXELS(WIDTH) \
470 static void put_vp8_pixels ## WIDTH ## _c(uint8_t *dst, ptrdiff_t dststride, \
471  const uint8_t *src, ptrdiff_t srcstride, \
472  int h, int x, int y) \
473 { \
474  int i; \
475  for (i = 0; i < h; i++, dst += dststride, src += srcstride) \
476  memcpy(dst, src, WIDTH); \
477 }
478 
479 PUT_PIXELS(16)
480 PUT_PIXELS(8)
481 PUT_PIXELS(4)
482 
483 #define FILTER_6TAP(src, F, stride) \
484  cm[(F[2] * src[x + 0 * stride] - F[1] * src[x - 1 * stride] + \
485  F[0] * src[x - 2 * stride] + F[3] * src[x + 1 * stride] - \
486  F[4] * src[x + 2 * stride] + F[5] * src[x + 3 * stride] + 64) >> 7]
487 
488 #define FILTER_4TAP(src, F, stride) \
489  cm[(F[2] * src[x + 0 * stride] - F[1] * src[x - 1 * stride] + \
490  F[3] * src[x + 1 * stride] - F[4] * src[x + 2 * stride] + 64) >> 7]
491 
492 #define VP8_EPEL_H(SIZE, TAPS) \
493 static void put_vp8_epel ## SIZE ## _h ## TAPS ## _c(uint8_t *dst, \
494  ptrdiff_t dststride, \
495  const uint8_t *src, \
496  ptrdiff_t srcstride, \
497  int h, int mx, int my) \
498 { \
499  const uint8_t *filter = subpel_filters[mx - 1]; \
500  const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
501  int x, y; \
502  for (y = 0; y < h; y++) { \
503  for (x = 0; x < SIZE; x++) \
504  dst[x] = FILTER_ ## TAPS ## TAP(src, filter, 1); \
505  dst += dststride; \
506  src += srcstride; \
507  } \
508 }
509 
510 #define VP8_EPEL_V(SIZE, TAPS) \
511 static void put_vp8_epel ## SIZE ## _v ## TAPS ## _c(uint8_t *dst, \
512  ptrdiff_t dststride, \
513  const uint8_t *src, \
514  ptrdiff_t srcstride, \
515  int h, int mx, int my) \
516 { \
517  const uint8_t *filter = subpel_filters[my - 1]; \
518  const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
519  int x, y; \
520  for (y = 0; y < h; y++) { \
521  for (x = 0; x < SIZE; x++) \
522  dst[x] = FILTER_ ## TAPS ## TAP(src, filter, srcstride); \
523  dst += dststride; \
524  src += srcstride; \
525  } \
526 }
527 
528 #define VP8_EPEL_HV(SIZE, HTAPS, VTAPS) \
529 static void \
530 put_vp8_epel ## SIZE ## _h ## HTAPS ## v ## VTAPS ## _c(uint8_t *dst, \
531  ptrdiff_t dststride, \
532  const uint8_t *src, \
533  ptrdiff_t srcstride, \
534  int h, int mx, \
535  int my) \
536 { \
537  const uint8_t *filter = subpel_filters[mx - 1]; \
538  const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; \
539  int x, y; \
540  uint8_t tmp_array[(2 * SIZE + VTAPS - 1) * SIZE]; \
541  uint8_t *tmp = tmp_array; \
542  src -= (2 - (VTAPS == 4)) * srcstride; \
543  \
544  for (y = 0; y < h + VTAPS - 1; y++) { \
545  for (x = 0; x < SIZE; x++) \
546  tmp[x] = FILTER_ ## HTAPS ## TAP(src, filter, 1); \
547  tmp += SIZE; \
548  src += srcstride; \
549  } \
550  tmp = tmp_array + (2 - (VTAPS == 4)) * SIZE; \
551  filter = subpel_filters[my - 1]; \
552  \
553  for (y = 0; y < h; y++) { \
554  for (x = 0; x < SIZE; x++) \
555  dst[x] = FILTER_ ## VTAPS ## TAP(tmp, filter, SIZE); \
556  dst += dststride; \
557  tmp += SIZE; \
558  } \
559 }
560 
561 VP8_EPEL_H(16, 4)
562 VP8_EPEL_H(8, 4)
563 VP8_EPEL_H(4, 4)
564 VP8_EPEL_H(16, 6)
565 VP8_EPEL_H(8, 6)
566 VP8_EPEL_H(4, 6)
567 VP8_EPEL_V(16, 4)
568 VP8_EPEL_V(8, 4)
569 VP8_EPEL_V(4, 4)
570 VP8_EPEL_V(16, 6)
571 VP8_EPEL_V(8, 6)
572 VP8_EPEL_V(4, 6)
573 
574 VP8_EPEL_HV(16, 4, 4)
575 VP8_EPEL_HV(8, 4, 4)
576 VP8_EPEL_HV(4, 4, 4)
577 VP8_EPEL_HV(16, 4, 6)
578 VP8_EPEL_HV(8, 4, 6)
579 VP8_EPEL_HV(4, 4, 6)
580 VP8_EPEL_HV(16, 6, 4)
581 VP8_EPEL_HV(8, 6, 4)
582 VP8_EPEL_HV(4, 6, 4)
583 VP8_EPEL_HV(16, 6, 6)
584 VP8_EPEL_HV(8, 6, 6)
585 VP8_EPEL_HV(4, 6, 6)
586 
587 #define VP8_BILINEAR(SIZE) \
588 static void put_vp8_bilinear ## SIZE ## _h_c(uint8_t *dst, ptrdiff_t dstride, \
589  const uint8_t *src, ptrdiff_t sstride, \
590  int h, int mx, int my) \
591 { \
592  int a = 8 - mx, b = mx; \
593  int x, y; \
594  for (y = 0; y < h; y++) { \
595  for (x = 0; x < SIZE; x++) \
596  dst[x] = (a * src[x] + b * src[x + 1] + 4) >> 3; \
597  dst += dstride; \
598  src += sstride; \
599  } \
600 } \
601  \
602 static void put_vp8_bilinear ## SIZE ## _v_c(uint8_t *dst, ptrdiff_t dstride, \
603  const uint8_t *src, ptrdiff_t sstride, \
604  int h, int mx, int my) \
605 { \
606  int c = 8 - my, d = my; \
607  int x, y; \
608  for (y = 0; y < h; y++) { \
609  for (x = 0; x < SIZE; x++) \
610  dst[x] = (c * src[x] + d * src[x + sstride] + 4) >> 3; \
611  dst += dstride; \
612  src += sstride; \
613  } \
614 } \
615  \
616 static void put_vp8_bilinear ## SIZE ## _hv_c(uint8_t *dst, \
617  ptrdiff_t dstride, \
618  const uint8_t *src, \
619  ptrdiff_t sstride, \
620  int h, int mx, int my) \
621 { \
622  int a = 8 - mx, b = mx; \
623  int c = 8 - my, d = my; \
624  int x, y; \
625  uint8_t tmp_array[(2 * SIZE + 1) * SIZE]; \
626  uint8_t *tmp = tmp_array; \
627  for (y = 0; y < h + 1; y++) { \
628  for (x = 0; x < SIZE; x++) \
629  tmp[x] = (a * src[x] + b * src[x + 1] + 4) >> 3; \
630  tmp += SIZE; \
631  src += sstride; \
632  } \
633  tmp = tmp_array; \
634  for (y = 0; y < h; y++) { \
635  for (x = 0; x < SIZE; x++) \
636  dst[x] = (c * tmp[x] + d * tmp[x + SIZE] + 4) >> 3; \
637  dst += dstride; \
638  tmp += SIZE; \
639  } \
640 }
641 
642 VP8_BILINEAR(16)
643 VP8_BILINEAR(8)
644 VP8_BILINEAR(4)
645 
646 #define VP78_MC_FUNC(IDX, SIZE) \
647  dsp->put_vp8_epel_pixels_tab[IDX][0][0] = put_vp8_pixels ## SIZE ## _c; \
648  dsp->put_vp8_epel_pixels_tab[IDX][0][1] = put_vp8_epel ## SIZE ## _h4_c; \
649  dsp->put_vp8_epel_pixels_tab[IDX][0][2] = put_vp8_epel ## SIZE ## _h6_c; \
650  dsp->put_vp8_epel_pixels_tab[IDX][1][0] = put_vp8_epel ## SIZE ## _v4_c; \
651  dsp->put_vp8_epel_pixels_tab[IDX][1][1] = put_vp8_epel ## SIZE ## _h4v4_c; \
652  dsp->put_vp8_epel_pixels_tab[IDX][1][2] = put_vp8_epel ## SIZE ## _h6v4_c; \
653  dsp->put_vp8_epel_pixels_tab[IDX][2][0] = put_vp8_epel ## SIZE ## _v6_c; \
654  dsp->put_vp8_epel_pixels_tab[IDX][2][1] = put_vp8_epel ## SIZE ## _h4v6_c; \
655  dsp->put_vp8_epel_pixels_tab[IDX][2][2] = put_vp8_epel ## SIZE ## _h6v6_c
656 
657 #define VP78_BILINEAR_MC_FUNC(IDX, SIZE) \
658  dsp->put_vp8_bilinear_pixels_tab[IDX][0][0] = put_vp8_pixels ## SIZE ## _c; \
659  dsp->put_vp8_bilinear_pixels_tab[IDX][0][1] = put_vp8_bilinear ## SIZE ## _h_c; \
660  dsp->put_vp8_bilinear_pixels_tab[IDX][0][2] = put_vp8_bilinear ## SIZE ## _h_c; \
661  dsp->put_vp8_bilinear_pixels_tab[IDX][1][0] = put_vp8_bilinear ## SIZE ## _v_c; \
662  dsp->put_vp8_bilinear_pixels_tab[IDX][1][1] = put_vp8_bilinear ## SIZE ## _hv_c; \
663  dsp->put_vp8_bilinear_pixels_tab[IDX][1][2] = put_vp8_bilinear ## SIZE ## _hv_c; \
664  dsp->put_vp8_bilinear_pixels_tab[IDX][2][0] = put_vp8_bilinear ## SIZE ## _v_c; \
665  dsp->put_vp8_bilinear_pixels_tab[IDX][2][1] = put_vp8_bilinear ## SIZE ## _hv_c; \
666  dsp->put_vp8_bilinear_pixels_tab[IDX][2][2] = put_vp8_bilinear ## SIZE ## _hv_c
667 
669 {
670  VP78_MC_FUNC(0, 16);
671  VP78_MC_FUNC(1, 8);
672  VP78_MC_FUNC(2, 4);
673 
674  VP78_BILINEAR_MC_FUNC(0, 16);
675  VP78_BILINEAR_MC_FUNC(1, 8);
676  VP78_BILINEAR_MC_FUNC(2, 4);
677 
678 #if ARCH_AARCH64
680 #elif ARCH_ARM
681  ff_vp78dsp_init_arm(dsp);
682 #elif ARCH_PPC
683  ff_vp78dsp_init_ppc(dsp);
684 #elif ARCH_RISCV
686 #elif ARCH_X86
687  ff_vp78dsp_init_x86(dsp);
688 #endif
689 }
690 
691 #if CONFIG_VP7_DECODER
692 LOOP_FILTERS(vp7)
693 
695 {
696  dsp->vp8_luma_dc_wht = vp7_luma_dc_wht_c;
697  dsp->vp8_luma_dc_wht_dc = vp7_luma_dc_wht_dc_c;
698  dsp->vp8_idct_add = vp7_idct_add_c;
699  dsp->vp8_idct_dc_add = vp7_idct_dc_add_c;
700  dsp->vp8_idct_dc_add4y = vp7_idct_dc_add4y_c;
701  dsp->vp8_idct_dc_add4uv = vp7_idct_dc_add4uv_c;
702 
703  dsp->vp8_v_loop_filter16y = vp7_v_loop_filter16_c;
704  dsp->vp8_h_loop_filter16y = vp7_h_loop_filter16_c;
705  dsp->vp8_v_loop_filter8uv = vp7_v_loop_filter8uv_c;
706  dsp->vp8_h_loop_filter8uv = vp7_h_loop_filter8uv_c;
707 
708  dsp->vp8_v_loop_filter16y_inner = vp7_v_loop_filter16_inner_c;
709  dsp->vp8_h_loop_filter16y_inner = vp7_h_loop_filter16_inner_c;
710  dsp->vp8_v_loop_filter8uv_inner = vp7_v_loop_filter8uv_inner_c;
711  dsp->vp8_h_loop_filter8uv_inner = vp7_h_loop_filter8uv_inner_c;
712 
713  dsp->vp8_v_loop_filter_simple = vp7_v_loop_filter_simple_c;
714  dsp->vp8_h_loop_filter_simple = vp7_h_loop_filter_simple_c;
715 
716 #if ARCH_RISCV
718 #endif
719 }
720 #endif /* CONFIG_VP7_DECODER */
721 
722 #if CONFIG_VP8_DECODER
723 LOOP_FILTERS(vp8)
724 
726 {
727  dsp->vp8_luma_dc_wht = vp8_luma_dc_wht_c;
728  dsp->vp8_luma_dc_wht_dc = vp8_luma_dc_wht_dc_c;
729  dsp->vp8_idct_add = vp8_idct_add_c;
730  dsp->vp8_idct_dc_add = vp8_idct_dc_add_c;
731  dsp->vp8_idct_dc_add4y = vp8_idct_dc_add4y_c;
732  dsp->vp8_idct_dc_add4uv = vp8_idct_dc_add4uv_c;
733 
734  dsp->vp8_v_loop_filter16y = vp8_v_loop_filter16_c;
735  dsp->vp8_h_loop_filter16y = vp8_h_loop_filter16_c;
736  dsp->vp8_v_loop_filter8uv = vp8_v_loop_filter8uv_c;
737  dsp->vp8_h_loop_filter8uv = vp8_h_loop_filter8uv_c;
738 
739  dsp->vp8_v_loop_filter16y_inner = vp8_v_loop_filter16_inner_c;
740  dsp->vp8_h_loop_filter16y_inner = vp8_h_loop_filter16_inner_c;
741  dsp->vp8_v_loop_filter8uv_inner = vp8_v_loop_filter8uv_inner_c;
742  dsp->vp8_h_loop_filter8uv_inner = vp8_h_loop_filter8uv_inner_c;
743 
744  dsp->vp8_v_loop_filter_simple = vp8_v_loop_filter_simple_c;
745  dsp->vp8_h_loop_filter_simple = vp8_h_loop_filter_simple_c;
746 
747 #if ARCH_AARCH64
749 #elif ARCH_ARM
750  ff_vp8dsp_init_arm(dsp);
751 #elif ARCH_RISCV
753 #elif ARCH_X86
754  ff_vp8dsp_init_x86(dsp);
755 #elif ARCH_MIPS
756  ff_vp8dsp_init_mips(dsp);
757 #elif ARCH_LOONGARCH
759 #endif
760 }
761 #endif /* CONFIG_VP8_DECODER */
VP8DSPContext::vp8_h_loop_filter8uv
void(* vp8_h_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:54
VP8DSPContext::vp8_h_loop_filter8uv_inner
void(* vp8_h_loop_filter8uv_inner)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:65
q1
static const uint8_t q1[256]
Definition: twofish.c:100
IS_VP7
#define IS_VP7
Definition: vp8dsp.h:103
filter_mbedge
static av_always_inline void filter_mbedge(uint8_t *p, ptrdiff_t stride)
Definition: vp8dsp.c:351
VP8DSPContext::vp8_v_loop_filter8uv
void(* vp8_v_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:52
vp7_filter_common
static av_always_inline void vp7_filter_common(uint8_t *p, ptrdiff_t stride, int is4tap)
Definition: vp8dsp.c:299
MK_IDCT_DC_ADD4_C
#define MK_IDCT_DC_ADD4_C(name)
Definition: vp8dsp.c:36
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
w
uint8_t w
Definition: llviddspenc.c:38
ff_vp78dsp_init_aarch64
av_cold void ff_vp78dsp_init_aarch64(VP8DSPContext *dsp)
Definition: vp8dsp_init_aarch64.c:43
VP8DSPContext::vp8_v_loop_filter16y
void(* vp8_v_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:48
VP78_MC_FUNC
#define VP78_MC_FUNC(IDX, SIZE)
Definition: vp8dsp.c:646
ff_vp8dsp_init_riscv
av_cold void ff_vp8dsp_init_riscv(VP8DSPContext *c)
Definition: vp8dsp_init.c:123
c1
static const uint64_t c1
Definition: murmur3.c:52
ff_crop_tab
#define ff_crop_tab
Definition: motionpixels_tablegen.c:26
vp8_filter_common
static av_always_inline void vp8_filter_common(uint8_t *p, ptrdiff_t stride, int is4tap)
Definition: vp8dsp.c:305
ff_vp78dsp_init_riscv
av_cold void ff_vp78dsp_init_riscv(VP8DSPContext *c)
Definition: vp8dsp_init.c:46
IS_VP8
#define IS_VP8(avctx)
Definition: libvpxenc.c:53
b1
static double b1(void *priv, double x, double y)
Definition: vf_xfade.c:2034
val
static double val(void *priv, double ch)
Definition: aeval.c:77
a2
static double a2(void *priv, double x, double y)
Definition: vf_xfade.c:2030
ff_vp7dsp_init
void ff_vp7dsp_init(VP8DSPContext *c)
ff_vp8dsp_init
void ff_vp8dsp_init(VP8DSPContext *c)
av_cold
#define av_cold
Definition: attributes.h:90
VP8DSPContext::vp8_v_loop_filter16y_inner
void(* vp8_v_loop_filter16y_inner)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:58
VP78_BILINEAR_MC_FUNC
#define VP78_BILINEAR_MC_FUNC(IDX, SIZE)
Definition: vp8dsp.c:657
vp8dsp.h
ff_vp78dsp_init_x86
void ff_vp78dsp_init_x86(VP8DSPContext *c)
Definition: vp8dsp_init.c:283
intreadwrite.h
AV_ZERO64
#define AV_ZERO64(d)
Definition: intreadwrite.h:666
VP8_BILINEAR
#define VP8_BILINEAR(SIZE)
Definition: vp8dsp.c:587
VP8_EPEL_HV
#define VP8_EPEL_HV(SIZE, HTAPS, VTAPS)
Definition: vp8dsp.c:528
VP8DSPContext::vp8_h_loop_filter_simple
void(* vp8_h_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
Definition: vp8dsp.h:70
q0
static const uint8_t q0[256]
Definition: twofish.c:81
VP8DSPContext::vp8_v_loop_filter_simple
void(* vp8_v_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
Definition: vp8dsp.h:69
clip_int8
#define clip_int8(n)
Definition: vp8dsp.c:261
VP8_EPEL_H
#define VP8_EPEL_H(SIZE, TAPS)
Definition: vp8dsp.c:492
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
VP8DSPContext::vp8_h_loop_filter16y
void(* vp8_h_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:50
mathops.h
ff_vp8dsp_init_x86
void ff_vp8dsp_init_x86(VP8DSPContext *c)
Definition: vp8dsp_init.c:323
vp8_simple_limit
static av_always_inline int vp8_simple_limit(uint8_t *p, ptrdiff_t stride, int flim)
Definition: vp8dsp.c:318
VP8DSPContext::vp8_h_loop_filter16y_inner
void(* vp8_h_loop_filter16y_inner)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:60
VP8DSPContext::vp8_luma_dc_wht
void(* vp8_luma_dc_wht)(int16_t block[4][4][16], int16_t dc[16])
Definition: vp8dsp.h:38
VP8DSPContext
Definition: vp8dsp.h:37
ff_vp7dsp_init_riscv
av_cold void ff_vp7dsp_init_riscv(VP8DSPContext *c)
Definition: vp7dsp_init.c:42
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NORMAL_LIMIT
#define NORMAL_LIMIT(vpn)
E - limit at the macroblock edge I - limit for interior difference.
Definition: vp8dsp.c:329
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
VP8_EPEL_V
#define VP8_EPEL_V(SIZE, TAPS)
Definition: vp8dsp.c:510
VP8DSPContext::vp8_idct_dc_add
void(* vp8_idct_dc_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
Definition: vp8dsp.h:41
vp7_simple_limit
static av_always_inline int vp7_simple_limit(uint8_t *p, ptrdiff_t stride, int flim)
Definition: vp8dsp.c:311
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
a0
static double a0(void *priv, double x, double y)
Definition: vf_xfade.c:2028
VP8DSPContext::vp8_v_loop_filter8uv_inner
void(* vp8_v_loop_filter8uv_inner)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:62
ff_vp8dsp_init_mips
av_cold void ff_vp8dsp_init_mips(VP8DSPContext *dsp)
Definition: vp8dsp_init_mips.c:77
ff_vp8dsp_init_aarch64
av_cold void ff_vp8dsp_init_aarch64(VP8DSPContext *dsp)
Definition: vp8dsp_init_aarch64.c:101
filter_common
static av_always_inline void filter_common(uint8_t *p, ptrdiff_t stride, int is4tap, int is_vp7)
Definition: vp8dsp.c:263
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
hev
static av_always_inline int hev(uint8_t *p, ptrdiff_t stride, int thresh)
Definition: vp8dsp.c:345
common.h
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
stride
#define stride
Definition: h264pred_template.c:537
VP8DSPContext::vp8_idct_dc_add4uv
void(* vp8_idct_dc_add4uv)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
Definition: vp8dsp.h:44
LOOP_FILTERS
#define LOOP_FILTERS(vpn)
Definition: vp8dsp.c:452
VP8DSPContext::vp8_idct_dc_add4y
void(* vp8_idct_dc_add4y)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
Definition: vp8dsp.h:42
PUT_PIXELS
#define PUT_PIXELS(WIDTH)
Definition: vp8dsp.c:469
LOAD_PIXELS
#define LOAD_PIXELS
Definition: vp8dsp.c:251
cm
#define cm
Definition: dvbsubdec.c:40
ff_vp8dsp_init_arm
av_cold void ff_vp8dsp_init_arm(VP8DSPContext *dsp)
Definition: vp8dsp_init_arm.c:36
VP8DSPContext::vp8_luma_dc_wht_dc
void(* vp8_luma_dc_wht_dc)(int16_t block[4][4][16], int16_t dc[16])
Definition: vp8dsp.h:39
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
ff_vp78dsp_init_ppc
av_cold void ff_vp78dsp_init_ppc(VP8DSPContext *c)
Definition: vp8dsp_altivec.c:332
ff_vp78dsp_init_arm
av_cold void ff_vp78dsp_init_arm(VP8DSPContext *dsp)
Definition: vp8dsp_init_arm.c:26
VP8DSPContext::vp8_idct_add
void(* vp8_idct_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
Definition: vp8dsp.h:40
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
a1
static double a1(void *priv, double x, double y)
Definition: vf_xfade.c:2029
ff_vp8dsp_init_loongarch
av_cold void ff_vp8dsp_init_loongarch(VP8DSPContext *dsp)
Definition: vp8dsp_init_loongarch.c:44
MAX_NEG_CROP
#define MAX_NEG_CROP
Definition: mathops.h:31
subpel_filters
static const uint8_t subpel_filters[7][6]
Definition: vp8dsp.c:459
ff_vp78dsp_init
av_cold void ff_vp78dsp_init(VP8DSPContext *dsp)
Definition: vp8dsp.c:668