FFmpeg
vp8.c
Go to the documentation of this file.
1 /*
2  * VP7/VP8 compatible video decoder
3  *
4  * Copyright (C) 2010 David Conrad
5  * Copyright (C) 2010 Ronald S. Bultje
6  * Copyright (C) 2010 Fiona Glaser
7  * Copyright (C) 2012 Daniel Kang
8  * Copyright (C) 2014 Peter Ross
9  *
10  * This file is part of FFmpeg.
11  *
12  * FFmpeg is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU Lesser General Public
14  * License as published by the Free Software Foundation; either
15  * version 2.1 of the License, or (at your option) any later version.
16  *
17  * FFmpeg is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20  * Lesser General Public License for more details.
21  *
22  * You should have received a copy of the GNU Lesser General Public
23  * License along with FFmpeg; if not, write to the Free Software
24  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25  */
26 
27 #include "libavutil/imgutils.h"
28 
29 #include "avcodec.h"
30 #include "hwaccel.h"
31 #include "internal.h"
32 #include "mathops.h"
33 #include "rectangle.h"
34 #include "thread.h"
35 #include "vp8.h"
36 #include "vp8data.h"
37 
38 #if ARCH_ARM
39 # include "arm/vp8.h"
40 #endif
41 
42 #if CONFIG_VP7_DECODER && CONFIG_VP8_DECODER
43 #define VPX(vp7, f) (vp7 ? vp7_ ## f : vp8_ ## f)
44 #elif CONFIG_VP7_DECODER
45 #define VPX(vp7, f) vp7_ ## f
46 #else // CONFIG_VP8_DECODER
47 #define VPX(vp7, f) vp8_ ## f
48 #endif
49 
50 static void free_buffers(VP8Context *s)
51 {
52  int i;
53  if (s->thread_data)
54  for (i = 0; i < MAX_THREADS; i++) {
55 #if HAVE_THREADS
56  pthread_cond_destroy(&s->thread_data[i].cond);
58 #endif
60  }
61  av_freep(&s->thread_data);
64  av_freep(&s->top_nnz);
65  av_freep(&s->top_border);
66 
67  s->macroblocks = NULL;
68 }
69 
71 {
72  int ret;
73  if ((ret = ff_thread_get_buffer(s->avctx, &f->tf,
74  ref ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
75  return ret;
76  if (!(f->seg_map = av_buffer_allocz(s->mb_width * s->mb_height)))
77  goto fail;
78  if (s->avctx->hwaccel) {
79  const AVHWAccel *hwaccel = s->avctx->hwaccel;
80  if (hwaccel->frame_priv_data_size) {
82  if (!f->hwaccel_priv_buf)
83  goto fail;
85  }
86  }
87  return 0;
88 
89 fail:
92  return AVERROR(ENOMEM);
93 }
94 
96 {
101 }
102 
103 #if CONFIG_VP8_DECODER
104 static int vp8_ref_frame(VP8Context *s, VP8Frame *dst, VP8Frame *src)
105 {
106  int ret;
107 
108  vp8_release_frame(s, dst);
109 
110  if ((ret = ff_thread_ref_frame(&dst->tf, &src->tf)) < 0)
111  return ret;
112  if (src->seg_map &&
113  !(dst->seg_map = av_buffer_ref(src->seg_map))) {
114  vp8_release_frame(s, dst);
115  return AVERROR(ENOMEM);
116  }
117  if (src->hwaccel_picture_private) {
119  if (!dst->hwaccel_priv_buf)
120  return AVERROR(ENOMEM);
122  }
123 
124  return 0;
125 }
126 #endif /* CONFIG_VP8_DECODER */
127 
128 static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
129 {
130  VP8Context *s = avctx->priv_data;
131  int i;
132 
133  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
134  vp8_release_frame(s, &s->frames[i]);
135  memset(s->framep, 0, sizeof(s->framep));
136 
137  if (free_mem)
138  free_buffers(s);
139 }
140 
141 static void vp8_decode_flush(AVCodecContext *avctx)
142 {
143  vp8_decode_flush_impl(avctx, 0);
144 }
145 
147 {
148  VP8Frame *frame = NULL;
149  int i;
150 
151  // find a free buffer
152  for (i = 0; i < 5; i++)
153  if (&s->frames[i] != s->framep[VP56_FRAME_CURRENT] &&
154  &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
155  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
156  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2]) {
157  frame = &s->frames[i];
158  break;
159  }
160  if (i == 5) {
161  av_log(s->avctx, AV_LOG_FATAL, "Ran out of free frames!\n");
162  abort();
163  }
164  if (frame->tf.f->buf[0])
165  vp8_release_frame(s, frame);
166 
167  return frame;
168 }
169 
171 {
172  enum AVPixelFormat pix_fmts[] = {
173 #if CONFIG_VP8_VAAPI_HWACCEL
175 #endif
176 #if CONFIG_VP8_NVDEC_HWACCEL
178 #endif
181  };
182 
183  return ff_get_format(s->avctx, pix_fmts);
184 }
185 
186 static av_always_inline
187 int update_dimensions(VP8Context *s, int width, int height, int is_vp7)
188 {
189  AVCodecContext *avctx = s->avctx;
190  int i, ret;
191 
192  if (width != s->avctx->width || ((width+15)/16 != s->mb_width || (height+15)/16 != s->mb_height) && s->macroblocks_base ||
193  height != s->avctx->height) {
195 
196  ret = ff_set_dimensions(s->avctx, width, height);
197  if (ret < 0)
198  return ret;
199  }
200 
201  if (!s->actually_webp && !is_vp7) {
202  s->pix_fmt = get_pixel_format(s);
203  if (s->pix_fmt < 0)
204  return AVERROR(EINVAL);
205  avctx->pix_fmt = s->pix_fmt;
206  }
207 
208  s->mb_width = (s->avctx->coded_width + 15) / 16;
209  s->mb_height = (s->avctx->coded_height + 15) / 16;
210 
211  s->mb_layout = is_vp7 || avctx->active_thread_type == FF_THREAD_SLICE &&
212  avctx->thread_count > 1;
213  if (!s->mb_layout) { // Frame threading and one thread
214  s->macroblocks_base = av_mallocz((s->mb_width + s->mb_height * 2 + 1) *
215  sizeof(*s->macroblocks));
217  } else // Sliced threading
218  s->macroblocks_base = av_mallocz((s->mb_width + 2) * (s->mb_height + 2) *
219  sizeof(*s->macroblocks));
220  s->top_nnz = av_mallocz(s->mb_width * sizeof(*s->top_nnz));
221  s->top_border = av_mallocz((s->mb_width + 1) * sizeof(*s->top_border));
223 
224  if (!s->macroblocks_base || !s->top_nnz || !s->top_border ||
225  !s->thread_data || (!s->intra4x4_pred_mode_top && !s->mb_layout)) {
226  free_buffers(s);
227  return AVERROR(ENOMEM);
228  }
229 
230  for (i = 0; i < MAX_THREADS; i++) {
232  av_mallocz(s->mb_width * sizeof(*s->thread_data[0].filter_strength));
233  if (!s->thread_data[i].filter_strength) {
234  free_buffers(s);
235  return AVERROR(ENOMEM);
236  }
237 #if HAVE_THREADS
238  pthread_mutex_init(&s->thread_data[i].lock, NULL);
239  pthread_cond_init(&s->thread_data[i].cond, NULL);
240 #endif
241  }
242 
243  s->macroblocks = s->macroblocks_base + 1;
244 
245  return 0;
246 }
247 
249 {
250  return update_dimensions(s, width, height, IS_VP7);
251 }
252 
254 {
255  return update_dimensions(s, width, height, IS_VP8);
256 }
257 
258 
260 {
261  VP56RangeCoder *c = &s->c;
262  int i;
263 
266 
269 
270  for (i = 0; i < 4; i++)
272 
273  for (i = 0; i < 4; i++)
275  }
276  if (s->segmentation.update_map)
277  for (i = 0; i < 3; i++)
278  s->prob->segmentid[i] = vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
279 }
280 
282 {
283  VP56RangeCoder *c = &s->c;
284  int i;
285 
286  for (i = 0; i < 4; i++) {
287  if (vp8_rac_get(c)) {
288  s->lf_delta.ref[i] = vp8_rac_get_uint(c, 6);
289 
290  if (vp8_rac_get(c))
291  s->lf_delta.ref[i] = -s->lf_delta.ref[i];
292  }
293  }
294 
295  for (i = MODE_I4x4; i <= VP8_MVMODE_SPLIT; i++) {
296  if (vp8_rac_get(c)) {
297  s->lf_delta.mode[i] = vp8_rac_get_uint(c, 6);
298 
299  if (vp8_rac_get(c))
300  s->lf_delta.mode[i] = -s->lf_delta.mode[i];
301  }
302  }
303 }
304 
305 static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
306 {
307  const uint8_t *sizes = buf;
308  int i;
309  int ret;
310 
311  s->num_coeff_partitions = 1 << vp8_rac_get_uint(&s->c, 2);
312 
313  buf += 3 * (s->num_coeff_partitions - 1);
314  buf_size -= 3 * (s->num_coeff_partitions - 1);
315  if (buf_size < 0)
316  return -1;
317 
318  for (i = 0; i < s->num_coeff_partitions - 1; i++) {
319  int size = AV_RL24(sizes + 3 * i);
320  if (buf_size - size < 0)
321  return -1;
323 
324  ret = ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, size);
325  if (ret < 0)
326  return ret;
327  buf += size;
328  buf_size -= size;
329  }
330 
331  s->coeff_partition_size[i] = buf_size;
332  ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, buf_size);
333 
334  return 0;
335 }
336 
337 static void vp7_get_quants(VP8Context *s)
338 {
339  VP56RangeCoder *c = &s->c;
340 
341  int yac_qi = vp8_rac_get_uint(c, 7);
342  int ydc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
343  int y2dc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
344  int y2ac_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
345  int uvdc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
346  int uvac_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
347 
348  s->qmat[0].luma_qmul[0] = vp7_ydc_qlookup[ydc_qi];
349  s->qmat[0].luma_qmul[1] = vp7_yac_qlookup[yac_qi];
350  s->qmat[0].luma_dc_qmul[0] = vp7_y2dc_qlookup[y2dc_qi];
351  s->qmat[0].luma_dc_qmul[1] = vp7_y2ac_qlookup[y2ac_qi];
352  s->qmat[0].chroma_qmul[0] = FFMIN(vp7_ydc_qlookup[uvdc_qi], 132);
353  s->qmat[0].chroma_qmul[1] = vp7_yac_qlookup[uvac_qi];
354 }
355 
356 static void vp8_get_quants(VP8Context *s)
357 {
358  VP56RangeCoder *c = &s->c;
359  int i, base_qi;
360 
361  s->quant.yac_qi = vp8_rac_get_uint(c, 7);
362  s->quant.ydc_delta = vp8_rac_get_sint(c, 4);
363  s->quant.y2dc_delta = vp8_rac_get_sint(c, 4);
364  s->quant.y2ac_delta = vp8_rac_get_sint(c, 4);
365  s->quant.uvdc_delta = vp8_rac_get_sint(c, 4);
366  s->quant.uvac_delta = vp8_rac_get_sint(c, 4);
367 
368  for (i = 0; i < 4; i++) {
369  if (s->segmentation.enabled) {
370  base_qi = s->segmentation.base_quant[i];
371  if (!s->segmentation.absolute_vals)
372  base_qi += s->quant.yac_qi;
373  } else
374  base_qi = s->quant.yac_qi;
375 
376  s->qmat[i].luma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + s->quant.ydc_delta, 7)];
377  s->qmat[i].luma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi, 7)];
378  s->qmat[i].luma_dc_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + s->quant.y2dc_delta, 7)] * 2;
379  /* 101581>>16 is equivalent to 155/100 */
380  s->qmat[i].luma_dc_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + s->quant.y2ac_delta, 7)] * 101581 >> 16;
381  s->qmat[i].chroma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + s->quant.uvdc_delta, 7)];
382  s->qmat[i].chroma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + s->quant.uvac_delta, 7)];
383 
384  s->qmat[i].luma_dc_qmul[1] = FFMAX(s->qmat[i].luma_dc_qmul[1], 8);
385  s->qmat[i].chroma_qmul[0] = FFMIN(s->qmat[i].chroma_qmul[0], 132);
386  }
387 }
388 
389 /**
390  * Determine which buffers golden and altref should be updated with after this frame.
391  * The spec isn't clear here, so I'm going by my understanding of what libvpx does
392  *
393  * Intra frames update all 3 references
394  * Inter frames update VP56_FRAME_PREVIOUS if the update_last flag is set
395  * If the update (golden|altref) flag is set, it's updated with the current frame
396  * if update_last is set, and VP56_FRAME_PREVIOUS otherwise.
397  * If the flag is not set, the number read means:
398  * 0: no update
399  * 1: VP56_FRAME_PREVIOUS
400  * 2: update golden with altref, or update altref with golden
401  */
403 {
404  VP56RangeCoder *c = &s->c;
405 
406  if (update)
407  return VP56_FRAME_CURRENT;
408 
409  switch (vp8_rac_get_uint(c, 2)) {
410  case 1:
411  return VP56_FRAME_PREVIOUS;
412  case 2:
414  }
415  return VP56_FRAME_NONE;
416 }
417 
419 {
420  int i, j;
421  for (i = 0; i < 4; i++)
422  for (j = 0; j < 16; j++)
423  memcpy(s->prob->token[i][j], vp8_token_default_probs[i][vp8_coeff_band[j]],
424  sizeof(s->prob->token[i][j]));
425 }
426 
428 {
429  VP56RangeCoder *c = &s->c;
430  int i, j, k, l, m;
431 
432  for (i = 0; i < 4; i++)
433  for (j = 0; j < 8; j++)
434  for (k = 0; k < 3; k++)
435  for (l = 0; l < NUM_DCT_TOKENS-1; l++)
437  int prob = vp8_rac_get_uint(c, 8);
438  for (m = 0; vp8_coeff_band_indexes[j][m] >= 0; m++)
439  s->prob->token[i][vp8_coeff_band_indexes[j][m]][k][l] = prob;
440  }
441 }
442 
443 #define VP7_MVC_SIZE 17
444 #define VP8_MVC_SIZE 19
445 
447  int mvc_size)
448 {
449  VP56RangeCoder *c = &s->c;
450  int i, j;
451 
452  if (vp8_rac_get(c))
453  for (i = 0; i < 4; i++)
454  s->prob->pred16x16[i] = vp8_rac_get_uint(c, 8);
455  if (vp8_rac_get(c))
456  for (i = 0; i < 3; i++)
457  s->prob->pred8x8c[i] = vp8_rac_get_uint(c, 8);
458 
459  // 17.2 MV probability update
460  for (i = 0; i < 2; i++)
461  for (j = 0; j < mvc_size; j++)
463  s->prob->mvc[i][j] = vp8_rac_get_nn(c);
464 }
465 
466 static void update_refs(VP8Context *s)
467 {
468  VP56RangeCoder *c = &s->c;
469 
470  int update_golden = vp8_rac_get(c);
471  int update_altref = vp8_rac_get(c);
472 
473  s->update_golden = ref_to_update(s, update_golden, VP56_FRAME_GOLDEN);
474  s->update_altref = ref_to_update(s, update_altref, VP56_FRAME_GOLDEN2);
475 }
476 
477 static void copy_chroma(AVFrame *dst, AVFrame *src, int width, int height)
478 {
479  int i, j;
480 
481  for (j = 1; j < 3; j++) {
482  for (i = 0; i < height / 2; i++)
483  memcpy(dst->data[j] + i * dst->linesize[j],
484  src->data[j] + i * src->linesize[j], width / 2);
485  }
486 }
487 
488 static void fade(uint8_t *dst, ptrdiff_t dst_linesize,
489  const uint8_t *src, ptrdiff_t src_linesize,
490  int width, int height,
491  int alpha, int beta)
492 {
493  int i, j;
494  for (j = 0; j < height; j++) {
495  const uint8_t *src2 = src + j * src_linesize;
496  uint8_t *dst2 = dst + j * dst_linesize;
497  for (i = 0; i < width; i++) {
498  uint8_t y = src2[i];
499  dst2[i] = av_clip_uint8(y + ((y * beta) >> 8) + alpha);
500  }
501  }
502 }
503 
504 static int vp7_fade_frame(VP8Context *s, int alpha, int beta)
505 {
506  int ret;
507 
508  if (!s->keyframe && (alpha || beta)) {
509  int width = s->mb_width * 16;
510  int height = s->mb_height * 16;
511  AVFrame *src, *dst;
512 
513  if (!s->framep[VP56_FRAME_PREVIOUS] ||
514  !s->framep[VP56_FRAME_GOLDEN]) {
515  av_log(s->avctx, AV_LOG_WARNING, "Discarding interframe without a prior keyframe!\n");
516  return AVERROR_INVALIDDATA;
517  }
518 
519  dst =
520  src = s->framep[VP56_FRAME_PREVIOUS]->tf.f;
521 
522  /* preserve the golden frame, write a new previous frame */
525  if ((ret = vp8_alloc_frame(s, s->framep[VP56_FRAME_PREVIOUS], 1)) < 0)
526  return ret;
527 
528  dst = s->framep[VP56_FRAME_PREVIOUS]->tf.f;
529 
530  copy_chroma(dst, src, width, height);
531  }
532 
533  fade(dst->data[0], dst->linesize[0],
534  src->data[0], src->linesize[0],
535  width, height, alpha, beta);
536  }
537 
538  return 0;
539 }
540 
541 static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
542 {
543  VP56RangeCoder *c = &s->c;
544  int part1_size, hscale, vscale, i, j, ret;
545  int width = s->avctx->width;
546  int height = s->avctx->height;
547  int alpha = 0;
548  int beta = 0;
549 
550  if (buf_size < 4) {
551  return AVERROR_INVALIDDATA;
552  }
553 
554  s->profile = (buf[0] >> 1) & 7;
555  if (s->profile > 1) {
556  avpriv_request_sample(s->avctx, "Unknown profile %d", s->profile);
557  return AVERROR_INVALIDDATA;
558  }
559 
560  s->keyframe = !(buf[0] & 1);
561  s->invisible = 0;
562  part1_size = AV_RL24(buf) >> 4;
563 
564  if (buf_size < 4 - s->profile + part1_size) {
565  av_log(s->avctx, AV_LOG_ERROR, "Buffer size %d is too small, needed : %d\n", buf_size, 4 - s->profile + part1_size);
566  return AVERROR_INVALIDDATA;
567  }
568 
569  buf += 4 - s->profile;
570  buf_size -= 4 - s->profile;
571 
572  memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_epel_pixels_tab, sizeof(s->put_pixels_tab));
573 
574  ret = ff_vp56_init_range_decoder(c, buf, part1_size);
575  if (ret < 0)
576  return ret;
577  buf += part1_size;
578  buf_size -= part1_size;
579 
580  /* A. Dimension information (keyframes only) */
581  if (s->keyframe) {
582  width = vp8_rac_get_uint(c, 12);
583  height = vp8_rac_get_uint(c, 12);
584  hscale = vp8_rac_get_uint(c, 2);
585  vscale = vp8_rac_get_uint(c, 2);
586  if (hscale || vscale)
587  avpriv_request_sample(s->avctx, "Upscaling");
588 
592  sizeof(s->prob->pred16x16));
594  sizeof(s->prob->pred8x8c));
595  for (i = 0; i < 2; i++)
596  memcpy(s->prob->mvc[i], vp7_mv_default_prob[i],
597  sizeof(vp7_mv_default_prob[i]));
598  memset(&s->segmentation, 0, sizeof(s->segmentation));
599  memset(&s->lf_delta, 0, sizeof(s->lf_delta));
600  memcpy(s->prob[0].scan, ff_zigzag_scan, sizeof(s->prob[0].scan));
601  }
602 
603  if (s->keyframe || s->profile > 0)
604  memset(s->inter_dc_pred, 0 , sizeof(s->inter_dc_pred));
605 
606  /* B. Decoding information for all four macroblock-level features */
607  for (i = 0; i < 4; i++) {
608  s->feature_enabled[i] = vp8_rac_get(c);
609  if (s->feature_enabled[i]) {
611 
612  for (j = 0; j < 3; j++)
613  s->feature_index_prob[i][j] =
614  vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
615 
616  if (vp7_feature_value_size[s->profile][i])
617  for (j = 0; j < 4; j++)
618  s->feature_value[i][j] =
620  }
621  }
622 
623  s->segmentation.enabled = 0;
624  s->segmentation.update_map = 0;
625  s->lf_delta.enabled = 0;
626 
627  s->num_coeff_partitions = 1;
628  ret = ff_vp56_init_range_decoder(&s->coeff_partition[0], buf, buf_size);
629  if (ret < 0)
630  return ret;
631 
632  if (!s->macroblocks_base || /* first frame */
633  width != s->avctx->width || height != s->avctx->height ||
634  (width + 15) / 16 != s->mb_width || (height + 15) / 16 != s->mb_height) {
635  if ((ret = vp7_update_dimensions(s, width, height)) < 0)
636  return ret;
637  }
638 
639  /* C. Dequantization indices */
640  vp7_get_quants(s);
641 
642  /* D. Golden frame update flag (a Flag) for interframes only */
643  if (!s->keyframe) {
646  }
647 
648  s->update_last = 1;
649  s->update_probabilities = 1;
650  s->fade_present = 1;
651 
652  if (s->profile > 0) {
654  if (!s->update_probabilities)
655  s->prob[1] = s->prob[0];
656 
657  if (!s->keyframe)
658  s->fade_present = vp8_rac_get(c);
659  }
660 
661  if (vpX_rac_is_end(c))
662  return AVERROR_INVALIDDATA;
663  /* E. Fading information for previous frame */
664  if (s->fade_present && vp8_rac_get(c)) {
665  alpha = (int8_t) vp8_rac_get_uint(c, 8);
666  beta = (int8_t) vp8_rac_get_uint(c, 8);
667  }
668 
669  /* F. Loop filter type */
670  if (!s->profile)
671  s->filter.simple = vp8_rac_get(c);
672 
673  /* G. DCT coefficient ordering specification */
674  if (vp8_rac_get(c))
675  for (i = 1; i < 16; i++)
676  s->prob[0].scan[i] = ff_zigzag_scan[vp8_rac_get_uint(c, 4)];
677 
678  /* H. Loop filter levels */
679  if (s->profile > 0)
680  s->filter.simple = vp8_rac_get(c);
681  s->filter.level = vp8_rac_get_uint(c, 6);
682  s->filter.sharpness = vp8_rac_get_uint(c, 3);
683 
684  /* I. DCT coefficient probability update; 13.3 Token Probability Updates */
686 
687  s->mbskip_enabled = 0;
688 
689  /* J. The remaining frame header data occurs ONLY FOR INTERFRAMES */
690  if (!s->keyframe) {
691  s->prob->intra = vp8_rac_get_uint(c, 8);
692  s->prob->last = vp8_rac_get_uint(c, 8);
694  }
695 
696  if (vpX_rac_is_end(c))
697  return AVERROR_INVALIDDATA;
698 
699  if ((ret = vp7_fade_frame(s, alpha, beta)) < 0)
700  return ret;
701 
702  return 0;
703 }
704 
705 static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
706 {
707  VP56RangeCoder *c = &s->c;
708  int header_size, hscale, vscale, ret;
709  int width = s->avctx->width;
710  int height = s->avctx->height;
711 
712  if (buf_size < 3) {
713  av_log(s->avctx, AV_LOG_ERROR, "Insufficent data (%d) for header\n", buf_size);
714  return AVERROR_INVALIDDATA;
715  }
716 
717  s->keyframe = !(buf[0] & 1);
718  s->profile = (buf[0]>>1) & 7;
719  s->invisible = !(buf[0] & 0x10);
720  header_size = AV_RL24(buf) >> 5;
721  buf += 3;
722  buf_size -= 3;
723 
724  s->header_partition_size = header_size;
725 
726  if (s->profile > 3)
727  av_log(s->avctx, AV_LOG_WARNING, "Unknown profile %d\n", s->profile);
728 
729  if (!s->profile)
731  sizeof(s->put_pixels_tab));
732  else // profile 1-3 use bilinear, 4+ aren't defined so whatever
734  sizeof(s->put_pixels_tab));
735 
736  if (header_size > buf_size - 7 * s->keyframe) {
737  av_log(s->avctx, AV_LOG_ERROR, "Header size larger than data provided\n");
738  return AVERROR_INVALIDDATA;
739  }
740 
741  if (s->keyframe) {
742  if (AV_RL24(buf) != 0x2a019d) {
744  "Invalid start code 0x%x\n", AV_RL24(buf));
745  return AVERROR_INVALIDDATA;
746  }
747  width = AV_RL16(buf + 3) & 0x3fff;
748  height = AV_RL16(buf + 5) & 0x3fff;
749  hscale = buf[4] >> 6;
750  vscale = buf[6] >> 6;
751  buf += 7;
752  buf_size -= 7;
753 
754  if (hscale || vscale)
755  avpriv_request_sample(s->avctx, "Upscaling");
756 
760  sizeof(s->prob->pred16x16));
762  sizeof(s->prob->pred8x8c));
763  memcpy(s->prob->mvc, vp8_mv_default_prob,
764  sizeof(s->prob->mvc));
765  memset(&s->segmentation, 0, sizeof(s->segmentation));
766  memset(&s->lf_delta, 0, sizeof(s->lf_delta));
767  }
768 
769  ret = ff_vp56_init_range_decoder(c, buf, header_size);
770  if (ret < 0)
771  return ret;
772  buf += header_size;
773  buf_size -= header_size;
774 
775  if (s->keyframe) {
776  s->colorspace = vp8_rac_get(c);
777  if (s->colorspace)
778  av_log(s->avctx, AV_LOG_WARNING, "Unspecified colorspace\n");
779  s->fullrange = vp8_rac_get(c);
780  }
781 
782  if ((s->segmentation.enabled = vp8_rac_get(c)))
784  else
785  s->segmentation.update_map = 0; // FIXME: move this to some init function?
786 
787  s->filter.simple = vp8_rac_get(c);
788  s->filter.level = vp8_rac_get_uint(c, 6);
789  s->filter.sharpness = vp8_rac_get_uint(c, 3);
790 
791  if ((s->lf_delta.enabled = vp8_rac_get(c))) {
792  s->lf_delta.update = vp8_rac_get(c);
793  if (s->lf_delta.update)
794  update_lf_deltas(s);
795  }
796 
797  if (setup_partitions(s, buf, buf_size)) {
798  av_log(s->avctx, AV_LOG_ERROR, "Invalid partitions\n");
799  return AVERROR_INVALIDDATA;
800  }
801 
802  if (!s->macroblocks_base || /* first frame */
803  width != s->avctx->width || height != s->avctx->height ||
804  (width+15)/16 != s->mb_width || (height+15)/16 != s->mb_height)
805  if ((ret = vp8_update_dimensions(s, width, height)) < 0)
806  return ret;
807 
808  vp8_get_quants(s);
809 
810  if (!s->keyframe) {
811  update_refs(s);
813  s->sign_bias[VP56_FRAME_GOLDEN2 /* altref */] = vp8_rac_get(c);
814  }
815 
816  // if we aren't saving this frame's probabilities for future frames,
817  // make a copy of the current probabilities
818  if (!(s->update_probabilities = vp8_rac_get(c)))
819  s->prob[1] = s->prob[0];
820 
821  s->update_last = s->keyframe || vp8_rac_get(c);
822 
824 
825  if ((s->mbskip_enabled = vp8_rac_get(c)))
826  s->prob->mbskip = vp8_rac_get_uint(c, 8);
827 
828  if (!s->keyframe) {
829  s->prob->intra = vp8_rac_get_uint(c, 8);
830  s->prob->last = vp8_rac_get_uint(c, 8);
831  s->prob->golden = vp8_rac_get_uint(c, 8);
833  }
834 
835  // Record the entropy coder state here so that hwaccels can use it.
836  s->c.code_word = vp56_rac_renorm(&s->c);
837  s->coder_state_at_header_end.input = s->c.buffer - (-s->c.bits / 8);
841 
842  return 0;
843 }
844 
845 static av_always_inline
846 void clamp_mv(VP8mvbounds *s, VP56mv *dst, const VP56mv *src)
847 {
848  dst->x = av_clip(src->x, av_clip(s->mv_min.x, INT16_MIN, INT16_MAX),
849  av_clip(s->mv_max.x, INT16_MIN, INT16_MAX));
850  dst->y = av_clip(src->y, av_clip(s->mv_min.y, INT16_MIN, INT16_MAX),
851  av_clip(s->mv_max.y, INT16_MIN, INT16_MAX));
852 }
853 
854 /**
855  * Motion vector coding, 17.1.
856  */
858 {
859  int bit, x = 0;
860 
861  if (vp56_rac_get_prob_branchy(c, p[0])) {
862  int i;
863 
864  for (i = 0; i < 3; i++)
865  x += vp56_rac_get_prob(c, p[9 + i]) << i;
866  for (i = (vp7 ? 7 : 9); i > 3; i--)
867  x += vp56_rac_get_prob(c, p[9 + i]) << i;
868  if (!(x & (vp7 ? 0xF0 : 0xFFF0)) || vp56_rac_get_prob(c, p[12]))
869  x += 8;
870  } else {
871  // small_mvtree
872  const uint8_t *ps = p + 2;
873  bit = vp56_rac_get_prob(c, *ps);
874  ps += 1 + 3 * bit;
875  x += 4 * bit;
876  bit = vp56_rac_get_prob(c, *ps);
877  ps += 1 + bit;
878  x += 2 * bit;
879  x += vp56_rac_get_prob(c, *ps);
880  }
881 
882  return (x && vp56_rac_get_prob(c, p[1])) ? -x : x;
883 }
884 
886 {
887  return read_mv_component(c, p, 1);
888 }
889 
891 {
892  return read_mv_component(c, p, 0);
893 }
894 
895 static av_always_inline
896 const uint8_t *get_submv_prob(uint32_t left, uint32_t top, int is_vp7)
897 {
898  if (is_vp7)
899  return vp7_submv_prob;
900 
901  if (left == top)
902  return vp8_submv_prob[4 - !!left];
903  if (!top)
904  return vp8_submv_prob[2];
905  return vp8_submv_prob[1 - !!left];
906 }
907 
908 /**
909  * Split motion vector prediction, 16.4.
910  * @returns the number of motion vectors parsed (2, 4 or 16)
911  */
912 static av_always_inline
914  int layout, int is_vp7)
915 {
916  int part_idx;
917  int n, num;
918  VP8Macroblock *top_mb;
919  VP8Macroblock *left_mb = &mb[-1];
920  const uint8_t *mbsplits_left = vp8_mbsplits[left_mb->partitioning];
921  const uint8_t *mbsplits_top, *mbsplits_cur, *firstidx;
922  VP56mv *top_mv;
923  VP56mv *left_mv = left_mb->bmv;
924  VP56mv *cur_mv = mb->bmv;
925 
926  if (!layout) // layout is inlined, s->mb_layout is not
927  top_mb = &mb[2];
928  else
929  top_mb = &mb[-s->mb_width - 1];
930  mbsplits_top = vp8_mbsplits[top_mb->partitioning];
931  top_mv = top_mb->bmv;
932 
936  else
937  part_idx = VP8_SPLITMVMODE_8x8;
938  } else {
939  part_idx = VP8_SPLITMVMODE_4x4;
940  }
941 
942  num = vp8_mbsplit_count[part_idx];
943  mbsplits_cur = vp8_mbsplits[part_idx],
944  firstidx = vp8_mbfirstidx[part_idx];
945  mb->partitioning = part_idx;
946 
947  for (n = 0; n < num; n++) {
948  int k = firstidx[n];
949  uint32_t left, above;
950  const uint8_t *submv_prob;
951 
952  if (!(k & 3))
953  left = AV_RN32A(&left_mv[mbsplits_left[k + 3]]);
954  else
955  left = AV_RN32A(&cur_mv[mbsplits_cur[k - 1]]);
956  if (k <= 3)
957  above = AV_RN32A(&top_mv[mbsplits_top[k + 12]]);
958  else
959  above = AV_RN32A(&cur_mv[mbsplits_cur[k - 4]]);
960 
961  submv_prob = get_submv_prob(left, above, is_vp7);
962 
963  if (vp56_rac_get_prob_branchy(c, submv_prob[0])) {
964  if (vp56_rac_get_prob_branchy(c, submv_prob[1])) {
965  if (vp56_rac_get_prob_branchy(c, submv_prob[2])) {
966  mb->bmv[n].y = mb->mv.y +
967  read_mv_component(c, s->prob->mvc[0], is_vp7);
968  mb->bmv[n].x = mb->mv.x +
969  read_mv_component(c, s->prob->mvc[1], is_vp7);
970  } else {
971  AV_ZERO32(&mb->bmv[n]);
972  }
973  } else {
974  AV_WN32A(&mb->bmv[n], above);
975  }
976  } else {
977  AV_WN32A(&mb->bmv[n], left);
978  }
979  }
980 
981  return num;
982 }
983 
984 /**
985  * The vp7 reference decoder uses a padding macroblock column (added to right
986  * edge of the frame) to guard against illegal macroblock offsets. The
987  * algorithm has bugs that permit offsets to straddle the padding column.
988  * This function replicates those bugs.
989  *
990  * @param[out] edge_x macroblock x address
991  * @param[out] edge_y macroblock y address
992  *
993  * @return macroblock offset legal (boolean)
994  */
995 static int vp7_calculate_mb_offset(int mb_x, int mb_y, int mb_width,
996  int xoffset, int yoffset, int boundary,
997  int *edge_x, int *edge_y)
998 {
999  int vwidth = mb_width + 1;
1000  int new = (mb_y + yoffset) * vwidth + mb_x + xoffset;
1001  if (new < boundary || new % vwidth == vwidth - 1)
1002  return 0;
1003  *edge_y = new / vwidth;
1004  *edge_x = new % vwidth;
1005  return 1;
1006 }
1007 
1008 static const VP56mv *get_bmv_ptr(const VP8Macroblock *mb, int subblock)
1009 {
1010  return &mb->bmv[mb->mode == VP8_MVMODE_SPLIT ? vp8_mbsplits[mb->partitioning][subblock] : 0];
1011 }
1012 
1013 static av_always_inline
1015  int mb_x, int mb_y, int layout)
1016 {
1017  VP8Macroblock *mb_edge[12];
1018  enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR };
1019  enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
1020  int idx = CNT_ZERO;
1021  VP56mv near_mv[3];
1022  uint8_t cnt[3] = { 0 };
1023  VP56RangeCoder *c = &s->c;
1024  int i;
1025 
1026  AV_ZERO32(&near_mv[0]);
1027  AV_ZERO32(&near_mv[1]);
1028  AV_ZERO32(&near_mv[2]);
1029 
1030  for (i = 0; i < VP7_MV_PRED_COUNT; i++) {
1031  const VP7MVPred * pred = &vp7_mv_pred[i];
1032  int edge_x, edge_y;
1033 
1034  if (vp7_calculate_mb_offset(mb_x, mb_y, s->mb_width, pred->xoffset,
1035  pred->yoffset, !s->profile, &edge_x, &edge_y)) {
1036  VP8Macroblock *edge = mb_edge[i] = (s->mb_layout == 1)
1037  ? s->macroblocks_base + 1 + edge_x +
1038  (s->mb_width + 1) * (edge_y + 1)
1039  : s->macroblocks + edge_x +
1040  (s->mb_height - edge_y - 1) * 2;
1041  uint32_t mv = AV_RN32A(get_bmv_ptr(edge, vp7_mv_pred[i].subblock));
1042  if (mv) {
1043  if (AV_RN32A(&near_mv[CNT_NEAREST])) {
1044  if (mv == AV_RN32A(&near_mv[CNT_NEAREST])) {
1045  idx = CNT_NEAREST;
1046  } else if (AV_RN32A(&near_mv[CNT_NEAR])) {
1047  if (mv != AV_RN32A(&near_mv[CNT_NEAR]))
1048  continue;
1049  idx = CNT_NEAR;
1050  } else {
1051  AV_WN32A(&near_mv[CNT_NEAR], mv);
1052  idx = CNT_NEAR;
1053  }
1054  } else {
1055  AV_WN32A(&near_mv[CNT_NEAREST], mv);
1056  idx = CNT_NEAREST;
1057  }
1058  } else {
1059  idx = CNT_ZERO;
1060  }
1061  } else {
1062  idx = CNT_ZERO;
1063  }
1064  cnt[idx] += vp7_mv_pred[i].score;
1065  }
1066 
1068 
1069  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_ZERO]][0])) {
1070  mb->mode = VP8_MVMODE_MV;
1071 
1072  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAREST]][1])) {
1073 
1074  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAR]][2])) {
1075 
1076  if (cnt[CNT_NEAREST] > cnt[CNT_NEAR])
1077  AV_WN32A(&mb->mv, cnt[CNT_ZERO] > cnt[CNT_NEAREST] ? 0 : AV_RN32A(&near_mv[CNT_NEAREST]));
1078  else
1079  AV_WN32A(&mb->mv, cnt[CNT_ZERO] > cnt[CNT_NEAR] ? 0 : AV_RN32A(&near_mv[CNT_NEAR]));
1080 
1081  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAR]][3])) {
1082  mb->mode = VP8_MVMODE_SPLIT;
1083  mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout, IS_VP7) - 1];
1084  } else {
1085  mb->mv.y += vp7_read_mv_component(c, s->prob->mvc[0]);
1086  mb->mv.x += vp7_read_mv_component(c, s->prob->mvc[1]);
1087  mb->bmv[0] = mb->mv;
1088  }
1089  } else {
1090  mb->mv = near_mv[CNT_NEAR];
1091  mb->bmv[0] = mb->mv;
1092  }
1093  } else {
1094  mb->mv = near_mv[CNT_NEAREST];
1095  mb->bmv[0] = mb->mv;
1096  }
1097  } else {
1098  mb->mode = VP8_MVMODE_ZERO;
1099  AV_ZERO32(&mb->mv);
1100  mb->bmv[0] = mb->mv;
1101  }
1102 }
1103 
1104 static av_always_inline
1106  int mb_x, int mb_y, int layout)
1107 {
1108  VP8Macroblock *mb_edge[3] = { 0 /* top */,
1109  mb - 1 /* left */,
1110  0 /* top-left */ };
1111  enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
1112  enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
1113  int idx = CNT_ZERO;
1114  int cur_sign_bias = s->sign_bias[mb->ref_frame];
1115  int8_t *sign_bias = s->sign_bias;
1116  VP56mv near_mv[4];
1117  uint8_t cnt[4] = { 0 };
1118  VP56RangeCoder *c = &s->c;
1119 
1120  if (!layout) { // layout is inlined (s->mb_layout is not)
1121  mb_edge[0] = mb + 2;
1122  mb_edge[2] = mb + 1;
1123  } else {
1124  mb_edge[0] = mb - s->mb_width - 1;
1125  mb_edge[2] = mb - s->mb_width - 2;
1126  }
1127 
1128  AV_ZERO32(&near_mv[0]);
1129  AV_ZERO32(&near_mv[1]);
1130  AV_ZERO32(&near_mv[2]);
1131 
1132  /* Process MB on top, left and top-left */
1133 #define MV_EDGE_CHECK(n) \
1134  { \
1135  VP8Macroblock *edge = mb_edge[n]; \
1136  int edge_ref = edge->ref_frame; \
1137  if (edge_ref != VP56_FRAME_CURRENT) { \
1138  uint32_t mv = AV_RN32A(&edge->mv); \
1139  if (mv) { \
1140  if (cur_sign_bias != sign_bias[edge_ref]) { \
1141  /* SWAR negate of the values in mv. */ \
1142  mv = ~mv; \
1143  mv = ((mv & 0x7fff7fff) + \
1144  0x00010001) ^ (mv & 0x80008000); \
1145  } \
1146  if (!n || mv != AV_RN32A(&near_mv[idx])) \
1147  AV_WN32A(&near_mv[++idx], mv); \
1148  cnt[idx] += 1 + (n != 2); \
1149  } else \
1150  cnt[CNT_ZERO] += 1 + (n != 2); \
1151  } \
1152  }
1153 
1154  MV_EDGE_CHECK(0)
1155  MV_EDGE_CHECK(1)
1156  MV_EDGE_CHECK(2)
1157 
1159  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_ZERO]][0])) {
1160  mb->mode = VP8_MVMODE_MV;
1161 
1162  /* If we have three distinct MVs, merge first and last if they're the same */
1163  if (cnt[CNT_SPLITMV] &&
1164  AV_RN32A(&near_mv[1 + VP8_EDGE_TOP]) == AV_RN32A(&near_mv[1 + VP8_EDGE_TOPLEFT]))
1165  cnt[CNT_NEAREST] += 1;
1166 
1167  /* Swap near and nearest if necessary */
1168  if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
1169  FFSWAP(uint8_t, cnt[CNT_NEAREST], cnt[CNT_NEAR]);
1170  FFSWAP( VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
1171  }
1172 
1173  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) {
1174  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAR]][2])) {
1175  /* Choose the best mv out of 0,0 and the nearest mv */
1176  clamp_mv(mv_bounds, &mb->mv, &near_mv[CNT_ZERO + (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])]);
1177  cnt[CNT_SPLITMV] = ((mb_edge[VP8_EDGE_LEFT]->mode == VP8_MVMODE_SPLIT) +
1178  (mb_edge[VP8_EDGE_TOP]->mode == VP8_MVMODE_SPLIT)) * 2 +
1179  (mb_edge[VP8_EDGE_TOPLEFT]->mode == VP8_MVMODE_SPLIT);
1180 
1181  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_SPLITMV]][3])) {
1182  mb->mode = VP8_MVMODE_SPLIT;
1183  mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout, IS_VP8) - 1];
1184  } else {
1185  mb->mv.y += vp8_read_mv_component(c, s->prob->mvc[0]);
1186  mb->mv.x += vp8_read_mv_component(c, s->prob->mvc[1]);
1187  mb->bmv[0] = mb->mv;
1188  }
1189  } else {
1190  clamp_mv(mv_bounds, &mb->mv, &near_mv[CNT_NEAR]);
1191  mb->bmv[0] = mb->mv;
1192  }
1193  } else {
1194  clamp_mv(mv_bounds, &mb->mv, &near_mv[CNT_NEAREST]);
1195  mb->bmv[0] = mb->mv;
1196  }
1197  } else {
1198  mb->mode = VP8_MVMODE_ZERO;
1199  AV_ZERO32(&mb->mv);
1200  mb->bmv[0] = mb->mv;
1201  }
1202 }
1203 
1204 static av_always_inline
1206  int mb_x, int keyframe, int layout)
1207 {
1208  uint8_t *intra4x4 = mb->intra4x4_pred_mode_mb;
1209 
1210  if (layout) {
1211  VP8Macroblock *mb_top = mb - s->mb_width - 1;
1212  memcpy(mb->intra4x4_pred_mode_top, mb_top->intra4x4_pred_mode_top, 4);
1213  }
1214  if (keyframe) {
1215  int x, y;
1216  uint8_t *top;
1217  uint8_t *const left = s->intra4x4_pred_mode_left;
1218  if (layout)
1219  top = mb->intra4x4_pred_mode_top;
1220  else
1221  top = s->intra4x4_pred_mode_top + 4 * mb_x;
1222  for (y = 0; y < 4; y++) {
1223  for (x = 0; x < 4; x++) {
1224  const uint8_t *ctx;
1225  ctx = vp8_pred4x4_prob_intra[top[x]][left[y]];
1226  *intra4x4 = vp8_rac_get_tree(c, vp8_pred4x4_tree, ctx);
1227  left[y] = top[x] = *intra4x4;
1228  intra4x4++;
1229  }
1230  }
1231  } else {
1232  int i;
1233  for (i = 0; i < 16; i++)
1234  intra4x4[i] = vp8_rac_get_tree(c, vp8_pred4x4_tree,
1236  }
1237 }
1238 
1239 static av_always_inline
1241  VP8Macroblock *mb, int mb_x, int mb_y,
1242  uint8_t *segment, uint8_t *ref, int layout, int is_vp7)
1243 {
1244  VP56RangeCoder *c = &s->c;
1245  static const char * const vp7_feature_name[] = { "q-index",
1246  "lf-delta",
1247  "partial-golden-update",
1248  "blit-pitch" };
1249  if (is_vp7) {
1250  int i;
1251  *segment = 0;
1252  for (i = 0; i < 4; i++) {
1253  if (s->feature_enabled[i]) {
1256  s->feature_index_prob[i]);
1258  "Feature %s present in macroblock (value 0x%x)\n",
1259  vp7_feature_name[i], s->feature_value[i][index]);
1260  }
1261  }
1262  }
1263  } else if (s->segmentation.update_map) {
1264  int bit = vp56_rac_get_prob(c, s->prob->segmentid[0]);
1265  *segment = vp56_rac_get_prob(c, s->prob->segmentid[1+bit]) + 2*bit;
1266  } else if (s->segmentation.enabled)
1267  *segment = ref ? *ref : *segment;
1268  mb->segment = *segment;
1269 
1270  mb->skip = s->mbskip_enabled ? vp56_rac_get_prob(c, s->prob->mbskip) : 0;
1271 
1272  if (s->keyframe) {
1275 
1276  if (mb->mode == MODE_I4x4) {
1277  decode_intra4x4_modes(s, c, mb, mb_x, 1, layout);
1278  } else {
1279  const uint32_t modes = (is_vp7 ? vp7_pred4x4_mode
1280  : vp8_pred4x4_mode)[mb->mode] * 0x01010101u;
1281  if (s->mb_layout)
1282  AV_WN32A(mb->intra4x4_pred_mode_top, modes);
1283  else
1284  AV_WN32A(s->intra4x4_pred_mode_top + 4 * mb_x, modes);
1285  AV_WN32A(s->intra4x4_pred_mode_left, modes);
1286  }
1287 
1291  } else if (vp56_rac_get_prob_branchy(c, s->prob->intra)) {
1292  // inter MB, 16.2
1293  if (vp56_rac_get_prob_branchy(c, s->prob->last))
1294  mb->ref_frame =
1295  (!is_vp7 && vp56_rac_get_prob(c, s->prob->golden)) ? VP56_FRAME_GOLDEN2 /* altref */
1297  else
1299  s->ref_count[mb->ref_frame - 1]++;
1300 
1301  // motion vectors, 16.3
1302  if (is_vp7)
1303  vp7_decode_mvs(s, mb, mb_x, mb_y, layout);
1304  else
1305  vp8_decode_mvs(s, mv_bounds, mb, mb_x, mb_y, layout);
1306  } else {
1307  // intra MB, 16.1
1309 
1310  if (mb->mode == MODE_I4x4)
1311  decode_intra4x4_modes(s, c, mb, mb_x, 0, layout);
1312 
1314  s->prob->pred8x8c);
1317  AV_ZERO32(&mb->bmv[0]);
1318  }
1319 }
1320 
1321 /**
1322  * @param r arithmetic bitstream reader context
1323  * @param block destination for block coefficients
1324  * @param probs probabilities to use when reading trees from the bitstream
1325  * @param i initial coeff index, 0 unless a separate DC block is coded
1326  * @param qmul array holding the dc/ac dequant factor at position 0/1
1327  *
1328  * @return 0 if no coeffs were decoded
1329  * otherwise, the index of the last coeff decoded plus one
1330  */
1331 static av_always_inline
1333  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1334  int i, uint8_t *token_prob, int16_t qmul[2],
1335  const uint8_t scan[16], int vp7)
1336 {
1337  VP56RangeCoder c = *r;
1338  goto skip_eob;
1339  do {
1340  int coeff;
1341 restart:
1342  if (!vp56_rac_get_prob_branchy(&c, token_prob[0])) // DCT_EOB
1343  break;
1344 
1345 skip_eob:
1346  if (!vp56_rac_get_prob_branchy(&c, token_prob[1])) { // DCT_0
1347  if (++i == 16)
1348  break; // invalid input; blocks should end with EOB
1349  token_prob = probs[i][0];
1350  if (vp7)
1351  goto restart;
1352  goto skip_eob;
1353  }
1354 
1355  if (!vp56_rac_get_prob_branchy(&c, token_prob[2])) { // DCT_1
1356  coeff = 1;
1357  token_prob = probs[i + 1][1];
1358  } else {
1359  if (!vp56_rac_get_prob_branchy(&c, token_prob[3])) { // DCT 2,3,4
1360  coeff = vp56_rac_get_prob_branchy(&c, token_prob[4]);
1361  if (coeff)
1362  coeff += vp56_rac_get_prob(&c, token_prob[5]);
1363  coeff += 2;
1364  } else {
1365  // DCT_CAT*
1366  if (!vp56_rac_get_prob_branchy(&c, token_prob[6])) {
1367  if (!vp56_rac_get_prob_branchy(&c, token_prob[7])) { // DCT_CAT1
1368  coeff = 5 + vp56_rac_get_prob(&c, vp8_dct_cat1_prob[0]);
1369  } else { // DCT_CAT2
1370  coeff = 7;
1371  coeff += vp56_rac_get_prob(&c, vp8_dct_cat2_prob[0]) << 1;
1372  coeff += vp56_rac_get_prob(&c, vp8_dct_cat2_prob[1]);
1373  }
1374  } else { // DCT_CAT3 and up
1375  int a = vp56_rac_get_prob(&c, token_prob[8]);
1376  int b = vp56_rac_get_prob(&c, token_prob[9 + a]);
1377  int cat = (a << 1) + b;
1378  coeff = 3 + (8 << cat);
1379  coeff += vp8_rac_get_coeff(&c, ff_vp8_dct_cat_prob[cat]);
1380  }
1381  }
1382  token_prob = probs[i + 1][2];
1383  }
1384  block[scan[i]] = (vp8_rac_get(&c) ? -coeff : coeff) * qmul[!!i];
1385  } while (++i < 16);
1386 
1387  *r = c;
1388  return i;
1389 }
1390 
1391 static av_always_inline
1392 int inter_predict_dc(int16_t block[16], int16_t pred[2])
1393 {
1394  int16_t dc = block[0];
1395  int ret = 0;
1396 
1397  if (pred[1] > 3) {
1398  dc += pred[0];
1399  ret = 1;
1400  }
1401 
1402  if (!pred[0] | !dc | ((int32_t)pred[0] ^ (int32_t)dc) >> 31) {
1403  block[0] = pred[0] = dc;
1404  pred[1] = 0;
1405  } else {
1406  if (pred[0] == dc)
1407  pred[1]++;
1408  block[0] = pred[0] = dc;
1409  }
1410 
1411  return ret;
1412 }
1413 
1415  int16_t block[16],
1416  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1417  int i, uint8_t *token_prob,
1418  int16_t qmul[2],
1419  const uint8_t scan[16])
1420 {
1421  return decode_block_coeffs_internal(r, block, probs, i,
1422  token_prob, qmul, scan, IS_VP7);
1423 }
1424 
1425 #ifndef vp8_decode_block_coeffs_internal
1427  int16_t block[16],
1428  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1429  int i, uint8_t *token_prob,
1430  int16_t qmul[2])
1431 {
1432  return decode_block_coeffs_internal(r, block, probs, i,
1433  token_prob, qmul, ff_zigzag_scan, IS_VP8);
1434 }
1435 #endif
1436 
1437 /**
1438  * @param c arithmetic bitstream reader context
1439  * @param block destination for block coefficients
1440  * @param probs probabilities to use when reading trees from the bitstream
1441  * @param i initial coeff index, 0 unless a separate DC block is coded
1442  * @param zero_nhood the initial prediction context for number of surrounding
1443  * all-zero blocks (only left/top, so 0-2)
1444  * @param qmul array holding the dc/ac dequant factor at position 0/1
1445  * @param scan scan pattern (VP7 only)
1446  *
1447  * @return 0 if no coeffs were decoded
1448  * otherwise, the index of the last coeff decoded plus one
1449  */
1450 static av_always_inline
1452  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1453  int i, int zero_nhood, int16_t qmul[2],
1454  const uint8_t scan[16], int vp7)
1455 {
1456  uint8_t *token_prob = probs[i][zero_nhood];
1457  if (!vp56_rac_get_prob_branchy(c, token_prob[0])) // DCT_EOB
1458  return 0;
1459  return vp7 ? vp7_decode_block_coeffs_internal(c, block, probs, i,
1460  token_prob, qmul, scan)
1461  : vp8_decode_block_coeffs_internal(c, block, probs, i,
1462  token_prob, qmul);
1463 }
1464 
1465 static av_always_inline
1467  VP8Macroblock *mb, uint8_t t_nnz[9], uint8_t l_nnz[9],
1468  int is_vp7)
1469 {
1470  int i, x, y, luma_start = 0, luma_ctx = 3;
1471  int nnz_pred, nnz, nnz_total = 0;
1472  int segment = mb->segment;
1473  int block_dc = 0;
1474 
1475  if (mb->mode != MODE_I4x4 && (is_vp7 || mb->mode != VP8_MVMODE_SPLIT)) {
1476  nnz_pred = t_nnz[8] + l_nnz[8];
1477 
1478  // decode DC values and do hadamard
1479  nnz = decode_block_coeffs(c, td->block_dc, s->prob->token[1], 0,
1480  nnz_pred, s->qmat[segment].luma_dc_qmul,
1481  ff_zigzag_scan, is_vp7);
1482  l_nnz[8] = t_nnz[8] = !!nnz;
1483 
1484  if (is_vp7 && mb->mode > MODE_I4x4) {
1485  nnz |= inter_predict_dc(td->block_dc,
1486  s->inter_dc_pred[mb->ref_frame - 1]);
1487  }
1488 
1489  if (nnz) {
1490  nnz_total += nnz;
1491  block_dc = 1;
1492  if (nnz == 1)
1493  s->vp8dsp.vp8_luma_dc_wht_dc(td->block, td->block_dc);
1494  else
1495  s->vp8dsp.vp8_luma_dc_wht(td->block, td->block_dc);
1496  }
1497  luma_start = 1;
1498  luma_ctx = 0;
1499  }
1500 
1501  // luma blocks
1502  for (y = 0; y < 4; y++)
1503  for (x = 0; x < 4; x++) {
1504  nnz_pred = l_nnz[y] + t_nnz[x];
1505  nnz = decode_block_coeffs(c, td->block[y][x],
1506  s->prob->token[luma_ctx],
1507  luma_start, nnz_pred,
1508  s->qmat[segment].luma_qmul,
1509  s->prob[0].scan, is_vp7);
1510  /* nnz+block_dc may be one more than the actual last index,
1511  * but we don't care */
1512  td->non_zero_count_cache[y][x] = nnz + block_dc;
1513  t_nnz[x] = l_nnz[y] = !!nnz;
1514  nnz_total += nnz;
1515  }
1516 
1517  // chroma blocks
1518  // TODO: what to do about dimensions? 2nd dim for luma is x,
1519  // but for chroma it's (y<<1)|x
1520  for (i = 4; i < 6; i++)
1521  for (y = 0; y < 2; y++)
1522  for (x = 0; x < 2; x++) {
1523  nnz_pred = l_nnz[i + 2 * y] + t_nnz[i + 2 * x];
1524  nnz = decode_block_coeffs(c, td->block[i][(y << 1) + x],
1525  s->prob->token[2], 0, nnz_pred,
1526  s->qmat[segment].chroma_qmul,
1527  s->prob[0].scan, is_vp7);
1528  td->non_zero_count_cache[i][(y << 1) + x] = nnz;
1529  t_nnz[i + 2 * x] = l_nnz[i + 2 * y] = !!nnz;
1530  nnz_total += nnz;
1531  }
1532 
1533  // if there were no coded coeffs despite the macroblock not being marked skip,
1534  // we MUST not do the inner loop filter and should not do IDCT
1535  // Since skip isn't used for bitstream prediction, just manually set it.
1536  if (!nnz_total)
1537  mb->skip = 1;
1538 }
1539 
1540 static av_always_inline
1541 void backup_mb_border(uint8_t *top_border, uint8_t *src_y,
1542  uint8_t *src_cb, uint8_t *src_cr,
1543  ptrdiff_t linesize, ptrdiff_t uvlinesize, int simple)
1544 {
1545  AV_COPY128(top_border, src_y + 15 * linesize);
1546  if (!simple) {
1547  AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
1548  AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
1549  }
1550 }
1551 
1552 static av_always_inline
1553 void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb,
1554  uint8_t *src_cr, ptrdiff_t linesize, ptrdiff_t uvlinesize, int mb_x,
1555  int mb_y, int mb_width, int simple, int xchg)
1556 {
1557  uint8_t *top_border_m1 = top_border - 32; // for TL prediction
1558  src_y -= linesize;
1559  src_cb -= uvlinesize;
1560  src_cr -= uvlinesize;
1561 
1562 #define XCHG(a, b, xchg) \
1563  do { \
1564  if (xchg) \
1565  AV_SWAP64(b, a); \
1566  else \
1567  AV_COPY64(b, a); \
1568  } while (0)
1569 
1570  XCHG(top_border_m1 + 8, src_y - 8, xchg);
1571  XCHG(top_border, src_y, xchg);
1572  XCHG(top_border + 8, src_y + 8, 1);
1573  if (mb_x < mb_width - 1)
1574  XCHG(top_border + 32, src_y + 16, 1);
1575 
1576  // only copy chroma for normal loop filter
1577  // or to initialize the top row to 127
1578  if (!simple || !mb_y) {
1579  XCHG(top_border_m1 + 16, src_cb - 8, xchg);
1580  XCHG(top_border_m1 + 24, src_cr - 8, xchg);
1581  XCHG(top_border + 16, src_cb, 1);
1582  XCHG(top_border + 24, src_cr, 1);
1583  }
1584 }
1585 
1586 static av_always_inline
1587 int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
1588 {
1589  if (!mb_x)
1590  return mb_y ? TOP_DC_PRED8x8 : DC_128_PRED8x8;
1591  else
1592  return mb_y ? mode : LEFT_DC_PRED8x8;
1593 }
1594 
1595 static av_always_inline
1596 int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y, int vp7)
1597 {
1598  if (!mb_x)
1599  return mb_y ? VERT_PRED8x8 : (vp7 ? DC_128_PRED8x8 : DC_129_PRED8x8);
1600  else
1601  return mb_y ? mode : HOR_PRED8x8;
1602 }
1603 
1604 static av_always_inline
1605 int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y, int vp7)
1606 {
1607  switch (mode) {
1608  case DC_PRED8x8:
1609  return check_dc_pred8x8_mode(mode, mb_x, mb_y);
1610  case VERT_PRED8x8:
1611  return !mb_y ? (vp7 ? DC_128_PRED8x8 : DC_127_PRED8x8) : mode;
1612  case HOR_PRED8x8:
1613  return !mb_x ? (vp7 ? DC_128_PRED8x8 : DC_129_PRED8x8) : mode;
1614  case PLANE_PRED8x8: /* TM */
1615  return check_tm_pred8x8_mode(mode, mb_x, mb_y, vp7);
1616  }
1617  return mode;
1618 }
1619 
1620 static av_always_inline
1621 int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y, int vp7)
1622 {
1623  if (!mb_x) {
1624  return mb_y ? VERT_VP8_PRED : (vp7 ? DC_128_PRED : DC_129_PRED);
1625  } else {
1626  return mb_y ? mode : HOR_VP8_PRED;
1627  }
1628 }
1629 
1630 static av_always_inline
1631 int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y,
1632  int *copy_buf, int vp7)
1633 {
1634  switch (mode) {
1635  case VERT_PRED:
1636  if (!mb_x && mb_y) {
1637  *copy_buf = 1;
1638  return mode;
1639  }
1640  /* fall-through */
1641  case DIAG_DOWN_LEFT_PRED:
1642  case VERT_LEFT_PRED:
1643  return !mb_y ? (vp7 ? DC_128_PRED : DC_127_PRED) : mode;
1644  case HOR_PRED:
1645  if (!mb_y) {
1646  *copy_buf = 1;
1647  return mode;
1648  }
1649  /* fall-through */
1650  case HOR_UP_PRED:
1651  return !mb_x ? (vp7 ? DC_128_PRED : DC_129_PRED) : mode;
1652  case TM_VP8_PRED:
1653  return check_tm_pred4x4_mode(mode, mb_x, mb_y, vp7);
1654  case DC_PRED: /* 4x4 DC doesn't use the same "H.264-style" exceptions
1655  * as 16x16/8x8 DC */
1656  case DIAG_DOWN_RIGHT_PRED:
1657  case VERT_RIGHT_PRED:
1658  case HOR_DOWN_PRED:
1659  if (!mb_y || !mb_x)
1660  *copy_buf = 1;
1661  return mode;
1662  }
1663  return mode;
1664 }
1665 
1666 static av_always_inline
1668  VP8Macroblock *mb, int mb_x, int mb_y, int is_vp7)
1669 {
1670  int x, y, mode, nnz;
1671  uint32_t tr;
1672 
1673  /* for the first row, we need to run xchg_mb_border to init the top edge
1674  * to 127 otherwise, skip it if we aren't going to deblock */
1675  if (mb_y && (s->deblock_filter || !mb_y) && td->thread_nr == 0)
1676  xchg_mb_border(s->top_border[mb_x + 1], dst[0], dst[1], dst[2],
1677  s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1678  s->filter.simple, 1);
1679 
1680  if (mb->mode < MODE_I4x4) {
1681  mode = check_intra_pred8x8_mode_emuedge(mb->mode, mb_x, mb_y, is_vp7);
1682  s->hpc.pred16x16[mode](dst[0], s->linesize);
1683  } else {
1684  uint8_t *ptr = dst[0];
1685  uint8_t *intra4x4 = mb->intra4x4_pred_mode_mb;
1686  const uint8_t lo = is_vp7 ? 128 : 127;
1687  const uint8_t hi = is_vp7 ? 128 : 129;
1688  uint8_t tr_top[4] = { lo, lo, lo, lo };
1689 
1690  // all blocks on the right edge of the macroblock use bottom edge
1691  // the top macroblock for their topright edge
1692  uint8_t *tr_right = ptr - s->linesize + 16;
1693 
1694  // if we're on the right edge of the frame, said edge is extended
1695  // from the top macroblock
1696  if (mb_y && mb_x == s->mb_width - 1) {
1697  tr = tr_right[-1] * 0x01010101u;
1698  tr_right = (uint8_t *) &tr;
1699  }
1700 
1701  if (mb->skip)
1703 
1704  for (y = 0; y < 4; y++) {
1705  uint8_t *topright = ptr + 4 - s->linesize;
1706  for (x = 0; x < 4; x++) {
1707  int copy = 0;
1708  ptrdiff_t linesize = s->linesize;
1709  uint8_t *dst = ptr + 4 * x;
1710  LOCAL_ALIGNED(4, uint8_t, copy_dst, [5 * 8]);
1711 
1712  if ((y == 0 || x == 3) && mb_y == 0) {
1713  topright = tr_top;
1714  } else if (x == 3)
1715  topright = tr_right;
1716 
1717  mode = check_intra_pred4x4_mode_emuedge(intra4x4[x], mb_x + x,
1718  mb_y + y, &copy, is_vp7);
1719  if (copy) {
1720  dst = copy_dst + 12;
1721  linesize = 8;
1722  if (!(mb_y + y)) {
1723  copy_dst[3] = lo;
1724  AV_WN32A(copy_dst + 4, lo * 0x01010101U);
1725  } else {
1726  AV_COPY32(copy_dst + 4, ptr + 4 * x - s->linesize);
1727  if (!(mb_x + x)) {
1728  copy_dst[3] = hi;
1729  } else {
1730  copy_dst[3] = ptr[4 * x - s->linesize - 1];
1731  }
1732  }
1733  if (!(mb_x + x)) {
1734  copy_dst[11] =
1735  copy_dst[19] =
1736  copy_dst[27] =
1737  copy_dst[35] = hi;
1738  } else {
1739  copy_dst[11] = ptr[4 * x - 1];
1740  copy_dst[19] = ptr[4 * x + s->linesize - 1];
1741  copy_dst[27] = ptr[4 * x + s->linesize * 2 - 1];
1742  copy_dst[35] = ptr[4 * x + s->linesize * 3 - 1];
1743  }
1744  }
1745  s->hpc.pred4x4[mode](dst, topright, linesize);
1746  if (copy) {
1747  AV_COPY32(ptr + 4 * x, copy_dst + 12);
1748  AV_COPY32(ptr + 4 * x + s->linesize, copy_dst + 20);
1749  AV_COPY32(ptr + 4 * x + s->linesize * 2, copy_dst + 28);
1750  AV_COPY32(ptr + 4 * x + s->linesize * 3, copy_dst + 36);
1751  }
1752 
1753  nnz = td->non_zero_count_cache[y][x];
1754  if (nnz) {
1755  if (nnz == 1)
1756  s->vp8dsp.vp8_idct_dc_add(ptr + 4 * x,
1757  td->block[y][x], s->linesize);
1758  else
1759  s->vp8dsp.vp8_idct_add(ptr + 4 * x,
1760  td->block[y][x], s->linesize);
1761  }
1762  topright += 4;
1763  }
1764 
1765  ptr += 4 * s->linesize;
1766  intra4x4 += 4;
1767  }
1768  }
1769 
1771  mb_x, mb_y, is_vp7);
1772  s->hpc.pred8x8[mode](dst[1], s->uvlinesize);
1773  s->hpc.pred8x8[mode](dst[2], s->uvlinesize);
1774 
1775  if (mb_y && (s->deblock_filter || !mb_y) && td->thread_nr == 0)
1776  xchg_mb_border(s->top_border[mb_x + 1], dst[0], dst[1], dst[2],
1777  s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1778  s->filter.simple, 0);
1779 }
1780 
1781 static const uint8_t subpel_idx[3][8] = {
1782  { 0, 1, 2, 1, 2, 1, 2, 1 }, // nr. of left extra pixels,
1783  // also function pointer index
1784  { 0, 3, 5, 3, 5, 3, 5, 3 }, // nr. of extra pixels required
1785  { 0, 2, 3, 2, 3, 2, 3, 2 }, // nr. of right extra pixels
1786 };
1787 
1788 /**
1789  * luma MC function
1790  *
1791  * @param s VP8 decoding context
1792  * @param dst target buffer for block data at block position
1793  * @param ref reference picture buffer at origin (0, 0)
1794  * @param mv motion vector (relative to block position) to get pixel data from
1795  * @param x_off horizontal position of block from origin (0, 0)
1796  * @param y_off vertical position of block from origin (0, 0)
1797  * @param block_w width of block (16, 8 or 4)
1798  * @param block_h height of block (always same as block_w)
1799  * @param width width of src/dst plane data
1800  * @param height height of src/dst plane data
1801  * @param linesize size of a single line of plane data, including padding
1802  * @param mc_func motion compensation function pointers (bilinear or sixtap MC)
1803  */
1804 static av_always_inline
1806  ThreadFrame *ref, const VP56mv *mv,
1807  int x_off, int y_off, int block_w, int block_h,
1808  int width, int height, ptrdiff_t linesize,
1809  vp8_mc_func mc_func[3][3])
1810 {
1811  uint8_t *src = ref->f->data[0];
1812 
1813  if (AV_RN32A(mv)) {
1814  ptrdiff_t src_linesize = linesize;
1815 
1816  int mx = (mv->x * 2) & 7, mx_idx = subpel_idx[0][mx];
1817  int my = (mv->y * 2) & 7, my_idx = subpel_idx[0][my];
1818 
1819  x_off += mv->x >> 2;
1820  y_off += mv->y >> 2;
1821 
1822  // edge emulation
1823  ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 4, 0);
1824  src += y_off * linesize + x_off;
1825  if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1826  y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1828  src - my_idx * linesize - mx_idx,
1829  EDGE_EMU_LINESIZE, linesize,
1830  block_w + subpel_idx[1][mx],
1831  block_h + subpel_idx[1][my],
1832  x_off - mx_idx, y_off - my_idx,
1833  width, height);
1834  src = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1835  src_linesize = EDGE_EMU_LINESIZE;
1836  }
1837  mc_func[my_idx][mx_idx](dst, linesize, src, src_linesize, block_h, mx, my);
1838  } else {
1839  ff_thread_await_progress(ref, (3 + y_off + block_h) >> 4, 0);
1840  mc_func[0][0](dst, linesize, src + y_off * linesize + x_off,
1841  linesize, block_h, 0, 0);
1842  }
1843 }
1844 
1845 /**
1846  * chroma MC function
1847  *
1848  * @param s VP8 decoding context
1849  * @param dst1 target buffer for block data at block position (U plane)
1850  * @param dst2 target buffer for block data at block position (V plane)
1851  * @param ref reference picture buffer at origin (0, 0)
1852  * @param mv motion vector (relative to block position) to get pixel data from
1853  * @param x_off horizontal position of block from origin (0, 0)
1854  * @param y_off vertical position of block from origin (0, 0)
1855  * @param block_w width of block (16, 8 or 4)
1856  * @param block_h height of block (always same as block_w)
1857  * @param width width of src/dst plane data
1858  * @param height height of src/dst plane data
1859  * @param linesize size of a single line of plane data, including padding
1860  * @param mc_func motion compensation function pointers (bilinear or sixtap MC)
1861  */
1862 static av_always_inline
1864  uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv,
1865  int x_off, int y_off, int block_w, int block_h,
1866  int width, int height, ptrdiff_t linesize,
1867  vp8_mc_func mc_func[3][3])
1868 {
1869  uint8_t *src1 = ref->f->data[1], *src2 = ref->f->data[2];
1870 
1871  if (AV_RN32A(mv)) {
1872  int mx = mv->x & 7, mx_idx = subpel_idx[0][mx];
1873  int my = mv->y & 7, my_idx = subpel_idx[0][my];
1874 
1875  x_off += mv->x >> 3;
1876  y_off += mv->y >> 3;
1877 
1878  // edge emulation
1879  src1 += y_off * linesize + x_off;
1880  src2 += y_off * linesize + x_off;
1881  ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 3, 0);
1882  if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1883  y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1885  src1 - my_idx * linesize - mx_idx,
1886  EDGE_EMU_LINESIZE, linesize,
1887  block_w + subpel_idx[1][mx],
1888  block_h + subpel_idx[1][my],
1889  x_off - mx_idx, y_off - my_idx, width, height);
1890  src1 = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1891  mc_func[my_idx][mx_idx](dst1, linesize, src1, EDGE_EMU_LINESIZE, block_h, mx, my);
1892 
1894  src2 - my_idx * linesize - mx_idx,
1895  EDGE_EMU_LINESIZE, linesize,
1896  block_w + subpel_idx[1][mx],
1897  block_h + subpel_idx[1][my],
1898  x_off - mx_idx, y_off - my_idx, width, height);
1899  src2 = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1900  mc_func[my_idx][mx_idx](dst2, linesize, src2, EDGE_EMU_LINESIZE, block_h, mx, my);
1901  } else {
1902  mc_func[my_idx][mx_idx](dst1, linesize, src1, linesize, block_h, mx, my);
1903  mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my);
1904  }
1905  } else {
1906  ff_thread_await_progress(ref, (3 + y_off + block_h) >> 3, 0);
1907  mc_func[0][0](dst1, linesize, src1 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1908  mc_func[0][0](dst2, linesize, src2 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1909  }
1910 }
1911 
1912 static av_always_inline
1914  ThreadFrame *ref_frame, int x_off, int y_off,
1915  int bx_off, int by_off, int block_w, int block_h,
1916  int width, int height, VP56mv *mv)
1917 {
1918  VP56mv uvmv = *mv;
1919 
1920  /* Y */
1921  vp8_mc_luma(s, td, dst[0] + by_off * s->linesize + bx_off,
1922  ref_frame, mv, x_off + bx_off, y_off + by_off,
1923  block_w, block_h, width, height, s->linesize,
1924  s->put_pixels_tab[block_w == 8]);
1925 
1926  /* U/V */
1927  if (s->profile == 3) {
1928  /* this block only applies VP8; it is safe to check
1929  * only the profile, as VP7 profile <= 1 */
1930  uvmv.x &= ~7;
1931  uvmv.y &= ~7;
1932  }
1933  x_off >>= 1;
1934  y_off >>= 1;
1935  bx_off >>= 1;
1936  by_off >>= 1;
1937  width >>= 1;
1938  height >>= 1;
1939  block_w >>= 1;
1940  block_h >>= 1;
1941  vp8_mc_chroma(s, td, dst[1] + by_off * s->uvlinesize + bx_off,
1942  dst[2] + by_off * s->uvlinesize + bx_off, ref_frame,
1943  &uvmv, x_off + bx_off, y_off + by_off,
1944  block_w, block_h, width, height, s->uvlinesize,
1945  s->put_pixels_tab[1 + (block_w == 4)]);
1946 }
1947 
1948 /* Fetch pixels for estimated mv 4 macroblocks ahead.
1949  * Optimized for 64-byte cache lines. Inspired by ffh264 prefetch_motion. */
1950 static av_always_inline
1951 void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
1952  int mb_xy, int ref)
1953 {
1954  /* Don't prefetch refs that haven't been used very often this frame. */
1955  if (s->ref_count[ref - 1] > (mb_xy >> 5)) {
1956  int x_off = mb_x << 4, y_off = mb_y << 4;
1957  int mx = (mb->mv.x >> 2) + x_off + 8;
1958  int my = (mb->mv.y >> 2) + y_off;
1959  uint8_t **src = s->framep[ref]->tf.f->data;
1960  int off = mx + (my + (mb_x & 3) * 4) * s->linesize + 64;
1961  /* For threading, a ff_thread_await_progress here might be useful, but
1962  * it actually slows down the decoder. Since a bad prefetch doesn't
1963  * generate bad decoder output, we don't run it here. */
1964  s->vdsp.prefetch(src[0] + off, s->linesize, 4);
1965  off = (mx >> 1) + ((my >> 1) + (mb_x & 7)) * s->uvlinesize + 64;
1966  s->vdsp.prefetch(src[1] + off, src[2] - src[1], 2);
1967  }
1968 }
1969 
1970 /**
1971  * Apply motion vectors to prediction buffer, chapter 18.
1972  */
1973 static av_always_inline
1975  VP8Macroblock *mb, int mb_x, int mb_y)
1976 {
1977  int x_off = mb_x << 4, y_off = mb_y << 4;
1978  int width = 16 * s->mb_width, height = 16 * s->mb_height;
1979  ThreadFrame *ref = &s->framep[mb->ref_frame]->tf;
1980  VP56mv *bmv = mb->bmv;
1981 
1982  switch (mb->partitioning) {
1983  case VP8_SPLITMVMODE_NONE:
1984  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1985  0, 0, 16, 16, width, height, &mb->mv);
1986  break;
1987  case VP8_SPLITMVMODE_4x4: {
1988  int x, y;
1989  VP56mv uvmv;
1990 
1991  /* Y */
1992  for (y = 0; y < 4; y++) {
1993  for (x = 0; x < 4; x++) {
1994  vp8_mc_luma(s, td, dst[0] + 4 * y * s->linesize + x * 4,
1995  ref, &bmv[4 * y + x],
1996  4 * x + x_off, 4 * y + y_off, 4, 4,
1997  width, height, s->linesize,
1998  s->put_pixels_tab[2]);
1999  }
2000  }
2001 
2002  /* U/V */
2003  x_off >>= 1;
2004  y_off >>= 1;
2005  width >>= 1;
2006  height >>= 1;
2007  for (y = 0; y < 2; y++) {
2008  for (x = 0; x < 2; x++) {
2009  uvmv.x = mb->bmv[2 * y * 4 + 2 * x ].x +
2010  mb->bmv[2 * y * 4 + 2 * x + 1].x +
2011  mb->bmv[(2 * y + 1) * 4 + 2 * x ].x +
2012  mb->bmv[(2 * y + 1) * 4 + 2 * x + 1].x;
2013  uvmv.y = mb->bmv[2 * y * 4 + 2 * x ].y +
2014  mb->bmv[2 * y * 4 + 2 * x + 1].y +
2015  mb->bmv[(2 * y + 1) * 4 + 2 * x ].y +
2016  mb->bmv[(2 * y + 1) * 4 + 2 * x + 1].y;
2017  uvmv.x = (uvmv.x + 2 + FF_SIGNBIT(uvmv.x)) >> 2;
2018  uvmv.y = (uvmv.y + 2 + FF_SIGNBIT(uvmv.y)) >> 2;
2019  if (s->profile == 3) {
2020  uvmv.x &= ~7;
2021  uvmv.y &= ~7;
2022  }
2023  vp8_mc_chroma(s, td, dst[1] + 4 * y * s->uvlinesize + x * 4,
2024  dst[2] + 4 * y * s->uvlinesize + x * 4, ref,
2025  &uvmv, 4 * x + x_off, 4 * y + y_off, 4, 4,
2026  width, height, s->uvlinesize,
2027  s->put_pixels_tab[2]);
2028  }
2029  }
2030  break;
2031  }
2032  case VP8_SPLITMVMODE_16x8:
2033  vp8_mc_part(s, td, dst, ref, x_off, y_off,
2034  0, 0, 16, 8, width, height, &bmv[0]);
2035  vp8_mc_part(s, td, dst, ref, x_off, y_off,
2036  0, 8, 16, 8, width, height, &bmv[1]);
2037  break;
2038  case VP8_SPLITMVMODE_8x16:
2039  vp8_mc_part(s, td, dst, ref, x_off, y_off,
2040  0, 0, 8, 16, width, height, &bmv[0]);
2041  vp8_mc_part(s, td, dst, ref, x_off, y_off,
2042  8, 0, 8, 16, width, height, &bmv[1]);
2043  break;
2044  case VP8_SPLITMVMODE_8x8:
2045  vp8_mc_part(s, td, dst, ref, x_off, y_off,
2046  0, 0, 8, 8, width, height, &bmv[0]);
2047  vp8_mc_part(s, td, dst, ref, x_off, y_off,
2048  8, 0, 8, 8, width, height, &bmv[1]);
2049  vp8_mc_part(s, td, dst, ref, x_off, y_off,
2050  0, 8, 8, 8, width, height, &bmv[2]);
2051  vp8_mc_part(s, td, dst, ref, x_off, y_off,
2052  8, 8, 8, 8, width, height, &bmv[3]);
2053  break;
2054  }
2055 }
2056 
2057 static av_always_inline
2059 {
2060  int x, y, ch;
2061 
2062  if (mb->mode != MODE_I4x4) {
2063  uint8_t *y_dst = dst[0];
2064  for (y = 0; y < 4; y++) {
2065  uint32_t nnz4 = AV_RL32(td->non_zero_count_cache[y]);
2066  if (nnz4) {
2067  if (nnz4 & ~0x01010101) {
2068  for (x = 0; x < 4; x++) {
2069  if ((uint8_t) nnz4 == 1)
2070  s->vp8dsp.vp8_idct_dc_add(y_dst + 4 * x,
2071  td->block[y][x],
2072  s->linesize);
2073  else if ((uint8_t) nnz4 > 1)
2074  s->vp8dsp.vp8_idct_add(y_dst + 4 * x,
2075  td->block[y][x],
2076  s->linesize);
2077  nnz4 >>= 8;
2078  if (!nnz4)
2079  break;
2080  }
2081  } else {
2082  s->vp8dsp.vp8_idct_dc_add4y(y_dst, td->block[y], s->linesize);
2083  }
2084  }
2085  y_dst += 4 * s->linesize;
2086  }
2087  }
2088 
2089  for (ch = 0; ch < 2; ch++) {
2090  uint32_t nnz4 = AV_RL32(td->non_zero_count_cache[4 + ch]);
2091  if (nnz4) {
2092  uint8_t *ch_dst = dst[1 + ch];
2093  if (nnz4 & ~0x01010101) {
2094  for (y = 0; y < 2; y++) {
2095  for (x = 0; x < 2; x++) {
2096  if ((uint8_t) nnz4 == 1)
2097  s->vp8dsp.vp8_idct_dc_add(ch_dst + 4 * x,
2098  td->block[4 + ch][(y << 1) + x],
2099  s->uvlinesize);
2100  else if ((uint8_t) nnz4 > 1)
2101  s->vp8dsp.vp8_idct_add(ch_dst + 4 * x,
2102  td->block[4 + ch][(y << 1) + x],
2103  s->uvlinesize);
2104  nnz4 >>= 8;
2105  if (!nnz4)
2106  goto chroma_idct_end;
2107  }
2108  ch_dst += 4 * s->uvlinesize;
2109  }
2110  } else {
2111  s->vp8dsp.vp8_idct_dc_add4uv(ch_dst, td->block[4 + ch], s->uvlinesize);
2112  }
2113  }
2114 chroma_idct_end:
2115  ;
2116  }
2117 }
2118 
2119 static av_always_inline
2121  VP8FilterStrength *f, int is_vp7)
2122 {
2123  int interior_limit, filter_level;
2124 
2125  if (s->segmentation.enabled) {
2126  filter_level = s->segmentation.filter_level[mb->segment];
2127  if (!s->segmentation.absolute_vals)
2128  filter_level += s->filter.level;
2129  } else
2130  filter_level = s->filter.level;
2131 
2132  if (s->lf_delta.enabled) {
2133  filter_level += s->lf_delta.ref[mb->ref_frame];
2134  filter_level += s->lf_delta.mode[mb->mode];
2135  }
2136 
2137  filter_level = av_clip_uintp2(filter_level, 6);
2138 
2139  interior_limit = filter_level;
2140  if (s->filter.sharpness) {
2141  interior_limit >>= (s->filter.sharpness + 3) >> 2;
2142  interior_limit = FFMIN(interior_limit, 9 - s->filter.sharpness);
2143  }
2144  interior_limit = FFMAX(interior_limit, 1);
2145 
2146  f->filter_level = filter_level;
2147  f->inner_limit = interior_limit;
2148  f->inner_filter = is_vp7 || !mb->skip || mb->mode == MODE_I4x4 ||
2149  mb->mode == VP8_MVMODE_SPLIT;
2150 }
2151 
2152 static av_always_inline
2154  int mb_x, int mb_y, int is_vp7)
2155 {
2156  int mbedge_lim, bedge_lim_y, bedge_lim_uv, hev_thresh;
2157  int filter_level = f->filter_level;
2158  int inner_limit = f->inner_limit;
2159  int inner_filter = f->inner_filter;
2160  ptrdiff_t linesize = s->linesize;
2161  ptrdiff_t uvlinesize = s->uvlinesize;
2162  static const uint8_t hev_thresh_lut[2][64] = {
2163  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
2164  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2165  3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
2166  3, 3, 3, 3 },
2167  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
2168  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2169  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2170  2, 2, 2, 2 }
2171  };
2172 
2173  if (!filter_level)
2174  return;
2175 
2176  if (is_vp7) {
2177  bedge_lim_y = filter_level;
2178  bedge_lim_uv = filter_level * 2;
2179  mbedge_lim = filter_level + 2;
2180  } else {
2181  bedge_lim_y =
2182  bedge_lim_uv = filter_level * 2 + inner_limit;
2183  mbedge_lim = bedge_lim_y + 4;
2184  }
2185 
2186  hev_thresh = hev_thresh_lut[s->keyframe][filter_level];
2187 
2188  if (mb_x) {
2189  s->vp8dsp.vp8_h_loop_filter16y(dst[0], linesize,
2190  mbedge_lim, inner_limit, hev_thresh);
2191  s->vp8dsp.vp8_h_loop_filter8uv(dst[1], dst[2], uvlinesize,
2192  mbedge_lim, inner_limit, hev_thresh);
2193  }
2194 
2195 #define H_LOOP_FILTER_16Y_INNER(cond) \
2196  if (cond && inner_filter) { \
2197  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 4, linesize, \
2198  bedge_lim_y, inner_limit, \
2199  hev_thresh); \
2200  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 8, linesize, \
2201  bedge_lim_y, inner_limit, \
2202  hev_thresh); \
2203  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 12, linesize, \
2204  bedge_lim_y, inner_limit, \
2205  hev_thresh); \
2206  s->vp8dsp.vp8_h_loop_filter8uv_inner(dst[1] + 4, dst[2] + 4, \
2207  uvlinesize, bedge_lim_uv, \
2208  inner_limit, hev_thresh); \
2209  }
2210 
2211  H_LOOP_FILTER_16Y_INNER(!is_vp7)
2212 
2213  if (mb_y) {
2214  s->vp8dsp.vp8_v_loop_filter16y(dst[0], linesize,
2215  mbedge_lim, inner_limit, hev_thresh);
2216  s->vp8dsp.vp8_v_loop_filter8uv(dst[1], dst[2], uvlinesize,
2217  mbedge_lim, inner_limit, hev_thresh);
2218  }
2219 
2220  if (inner_filter) {
2221  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 4 * linesize,
2222  linesize, bedge_lim_y,
2223  inner_limit, hev_thresh);
2224  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 8 * linesize,
2225  linesize, bedge_lim_y,
2226  inner_limit, hev_thresh);
2227  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 12 * linesize,
2228  linesize, bedge_lim_y,
2229  inner_limit, hev_thresh);
2230  s->vp8dsp.vp8_v_loop_filter8uv_inner(dst[1] + 4 * uvlinesize,
2231  dst[2] + 4 * uvlinesize,
2232  uvlinesize, bedge_lim_uv,
2233  inner_limit, hev_thresh);
2234  }
2235 
2236  H_LOOP_FILTER_16Y_INNER(is_vp7)
2237 }
2238 
2239 static av_always_inline
2241  int mb_x, int mb_y)
2242 {
2243  int mbedge_lim, bedge_lim;
2244  int filter_level = f->filter_level;
2245  int inner_limit = f->inner_limit;
2246  int inner_filter = f->inner_filter;
2247  ptrdiff_t linesize = s->linesize;
2248 
2249  if (!filter_level)
2250  return;
2251 
2252  bedge_lim = 2 * filter_level + inner_limit;
2253  mbedge_lim = bedge_lim + 4;
2254 
2255  if (mb_x)
2256  s->vp8dsp.vp8_h_loop_filter_simple(dst, linesize, mbedge_lim);
2257  if (inner_filter) {
2258  s->vp8dsp.vp8_h_loop_filter_simple(dst + 4, linesize, bedge_lim);
2259  s->vp8dsp.vp8_h_loop_filter_simple(dst + 8, linesize, bedge_lim);
2260  s->vp8dsp.vp8_h_loop_filter_simple(dst + 12, linesize, bedge_lim);
2261  }
2262 
2263  if (mb_y)
2264  s->vp8dsp.vp8_v_loop_filter_simple(dst, linesize, mbedge_lim);
2265  if (inner_filter) {
2266  s->vp8dsp.vp8_v_loop_filter_simple(dst + 4 * linesize, linesize, bedge_lim);
2267  s->vp8dsp.vp8_v_loop_filter_simple(dst + 8 * linesize, linesize, bedge_lim);
2268  s->vp8dsp.vp8_v_loop_filter_simple(dst + 12 * linesize, linesize, bedge_lim);
2269  }
2270 }
2271 
2272 #define MARGIN (16 << 2)
2273 static av_always_inline
2275  VP8Frame *prev_frame, int is_vp7)
2276 {
2277  VP8Context *s = avctx->priv_data;
2278  int mb_x, mb_y;
2279 
2280  s->mv_bounds.mv_min.y = -MARGIN;
2281  s->mv_bounds.mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
2282  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
2284  ((s->mb_width + 1) * (mb_y + 1) + 1);
2285  int mb_xy = mb_y * s->mb_width;
2286 
2287  AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED * 0x01010101);
2288 
2289  s->mv_bounds.mv_min.x = -MARGIN;
2290  s->mv_bounds.mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
2291 
2292  if (vpX_rac_is_end(&s->c)) {
2293  return AVERROR_INVALIDDATA;
2294  }
2295  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
2296  if (mb_y == 0)
2297  AV_WN32A((mb - s->mb_width - 1)->intra4x4_pred_mode_top,
2298  DC_PRED * 0x01010101);
2299  decode_mb_mode(s, &s->mv_bounds, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
2300  prev_frame && prev_frame->seg_map ?
2301  prev_frame->seg_map->data + mb_xy : NULL, 1, is_vp7);
2302  s->mv_bounds.mv_min.x -= 64;
2303  s->mv_bounds.mv_max.x -= 64;
2304  }
2305  s->mv_bounds.mv_min.y -= 64;
2306  s->mv_bounds.mv_max.y -= 64;
2307  }
2308  return 0;
2309 }
2310 
2311 static int vp7_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame,
2312  VP8Frame *prev_frame)
2313 {
2314  return vp78_decode_mv_mb_modes(avctx, cur_frame, prev_frame, IS_VP7);
2315 }
2316 
2317 static int vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame,
2318  VP8Frame *prev_frame)
2319 {
2320  return vp78_decode_mv_mb_modes(avctx, cur_frame, prev_frame, IS_VP8);
2321 }
2322 
2323 #if HAVE_THREADS
2324 #define check_thread_pos(td, otd, mb_x_check, mb_y_check) \
2325  do { \
2326  int tmp = (mb_y_check << 16) | (mb_x_check & 0xFFFF); \
2327  if (atomic_load(&otd->thread_mb_pos) < tmp) { \
2328  pthread_mutex_lock(&otd->lock); \
2329  atomic_store(&td->wait_mb_pos, tmp); \
2330  do { \
2331  if (atomic_load(&otd->thread_mb_pos) >= tmp) \
2332  break; \
2333  pthread_cond_wait(&otd->cond, &otd->lock); \
2334  } while (1); \
2335  atomic_store(&td->wait_mb_pos, INT_MAX); \
2336  pthread_mutex_unlock(&otd->lock); \
2337  } \
2338  } while (0)
2339 
2340 #define update_pos(td, mb_y, mb_x) \
2341  do { \
2342  int pos = (mb_y << 16) | (mb_x & 0xFFFF); \
2343  int sliced_threading = (avctx->active_thread_type == FF_THREAD_SLICE) && \
2344  (num_jobs > 1); \
2345  int is_null = !next_td || !prev_td; \
2346  int pos_check = (is_null) ? 1 : \
2347  (next_td != td && pos >= atomic_load(&next_td->wait_mb_pos)) || \
2348  (prev_td != td && pos >= atomic_load(&prev_td->wait_mb_pos)); \
2349  atomic_store(&td->thread_mb_pos, pos); \
2350  if (sliced_threading && pos_check) { \
2351  pthread_mutex_lock(&td->lock); \
2352  pthread_cond_broadcast(&td->cond); \
2353  pthread_mutex_unlock(&td->lock); \
2354  } \
2355  } while (0)
2356 #else
2357 #define check_thread_pos(td, otd, mb_x_check, mb_y_check) while(0)
2358 #define update_pos(td, mb_y, mb_x) while(0)
2359 #endif
2360 
2362  int jobnr, int threadnr, int is_vp7)
2363 {
2364  VP8Context *s = avctx->priv_data;
2365  VP8ThreadData *prev_td, *next_td, *td = &s->thread_data[threadnr];
2366  int mb_y = atomic_load(&td->thread_mb_pos) >> 16;
2367  int mb_x, mb_xy = mb_y * s->mb_width;
2368  int num_jobs = s->num_jobs;
2369  VP8Frame *curframe = s->curframe, *prev_frame = s->prev_frame;
2370  VP56RangeCoder *c = &s->coeff_partition[mb_y & (s->num_coeff_partitions - 1)];
2371  VP8Macroblock *mb;
2372  uint8_t *dst[3] = {
2373  curframe->tf.f->data[0] + 16 * mb_y * s->linesize,
2374  curframe->tf.f->data[1] + 8 * mb_y * s->uvlinesize,
2375  curframe->tf.f->data[2] + 8 * mb_y * s->uvlinesize
2376  };
2377 
2378  if (vpX_rac_is_end(c))
2379  return AVERROR_INVALIDDATA;
2380 
2381  if (mb_y == 0)
2382  prev_td = td;
2383  else
2384  prev_td = &s->thread_data[(jobnr + num_jobs - 1) % num_jobs];
2385  if (mb_y == s->mb_height - 1)
2386  next_td = td;
2387  else
2388  next_td = &s->thread_data[(jobnr + 1) % num_jobs];
2389  if (s->mb_layout == 1)
2390  mb = s->macroblocks_base + ((s->mb_width + 1) * (mb_y + 1) + 1);
2391  else {
2392  // Make sure the previous frame has read its segmentation map,
2393  // if we re-use the same map.
2394  if (prev_frame && s->segmentation.enabled &&
2396  ff_thread_await_progress(&prev_frame->tf, mb_y, 0);
2397  mb = s->macroblocks + (s->mb_height - mb_y - 1) * 2;
2398  memset(mb - 1, 0, sizeof(*mb)); // zero left macroblock
2399  AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED * 0x01010101);
2400  }
2401 
2402  if (!is_vp7 || mb_y == 0)
2403  memset(td->left_nnz, 0, sizeof(td->left_nnz));
2404 
2405  td->mv_bounds.mv_min.x = -MARGIN;
2406  td->mv_bounds.mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
2407 
2408  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
2409  if (vpX_rac_is_end(c))
2410  return AVERROR_INVALIDDATA;
2411  // Wait for previous thread to read mb_x+2, and reach mb_y-1.
2412  if (prev_td != td) {
2413  if (threadnr != 0) {
2414  check_thread_pos(td, prev_td,
2415  mb_x + (is_vp7 ? 2 : 1),
2416  mb_y - (is_vp7 ? 2 : 1));
2417  } else {
2418  check_thread_pos(td, prev_td,
2419  mb_x + (is_vp7 ? 2 : 1) + s->mb_width + 3,
2420  mb_y - (is_vp7 ? 2 : 1));
2421  }
2422  }
2423 
2424  s->vdsp.prefetch(dst[0] + (mb_x & 3) * 4 * s->linesize + 64,
2425  s->linesize, 4);
2426  s->vdsp.prefetch(dst[1] + (mb_x & 7) * s->uvlinesize + 64,
2427  dst[2] - dst[1], 2);
2428 
2429  if (!s->mb_layout)
2430  decode_mb_mode(s, &td->mv_bounds, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
2431  prev_frame && prev_frame->seg_map ?
2432  prev_frame->seg_map->data + mb_xy : NULL, 0, is_vp7);
2433 
2434  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_PREVIOUS);
2435 
2436  if (!mb->skip)
2437  decode_mb_coeffs(s, td, c, mb, s->top_nnz[mb_x], td->left_nnz, is_vp7);
2438 
2439  if (mb->mode <= MODE_I4x4)
2440  intra_predict(s, td, dst, mb, mb_x, mb_y, is_vp7);
2441  else
2442  inter_predict(s, td, dst, mb, mb_x, mb_y);
2443 
2444  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN);
2445 
2446  if (!mb->skip) {
2447  idct_mb(s, td, dst, mb);
2448  } else {
2449  AV_ZERO64(td->left_nnz);
2450  AV_WN64(s->top_nnz[mb_x], 0); // array of 9, so unaligned
2451 
2452  /* Reset DC block predictors if they would exist
2453  * if the mb had coefficients */
2454  if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
2455  td->left_nnz[8] = 0;
2456  s->top_nnz[mb_x][8] = 0;
2457  }
2458  }
2459 
2460  if (s->deblock_filter)
2461  filter_level_for_mb(s, mb, &td->filter_strength[mb_x], is_vp7);
2462 
2463  if (s->deblock_filter && num_jobs != 1 && threadnr == num_jobs - 1) {
2464  if (s->filter.simple)
2465  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2466  NULL, NULL, s->linesize, 0, 1);
2467  else
2468  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2469  dst[1], dst[2], s->linesize, s->uvlinesize, 0);
2470  }
2471 
2472  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN2);
2473 
2474  dst[0] += 16;
2475  dst[1] += 8;
2476  dst[2] += 8;
2477  td->mv_bounds.mv_min.x -= 64;
2478  td->mv_bounds.mv_max.x -= 64;
2479 
2480  if (mb_x == s->mb_width + 1) {
2481  update_pos(td, mb_y, s->mb_width + 3);
2482  } else {
2483  update_pos(td, mb_y, mb_x);
2484  }
2485  }
2486  return 0;
2487 }
2488 
2489 static int vp7_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata,
2490  int jobnr, int threadnr)
2491 {
2492  return decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr, 1);
2493 }
2494 
2495 static int vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata,
2496  int jobnr, int threadnr)
2497 {
2498  return decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr, 0);
2499 }
2500 
2501 static av_always_inline void filter_mb_row(AVCodecContext *avctx, void *tdata,
2502  int jobnr, int threadnr, int is_vp7)
2503 {
2504  VP8Context *s = avctx->priv_data;
2505  VP8ThreadData *td = &s->thread_data[threadnr];
2506  int mb_x, mb_y = atomic_load(&td->thread_mb_pos) >> 16, num_jobs = s->num_jobs;
2507  AVFrame *curframe = s->curframe->tf.f;
2508  VP8Macroblock *mb;
2509  VP8ThreadData *prev_td, *next_td;
2510  uint8_t *dst[3] = {
2511  curframe->data[0] + 16 * mb_y * s->linesize,
2512  curframe->data[1] + 8 * mb_y * s->uvlinesize,
2513  curframe->data[2] + 8 * mb_y * s->uvlinesize
2514  };
2515 
2516  if (s->mb_layout == 1)
2517  mb = s->macroblocks_base + ((s->mb_width + 1) * (mb_y + 1) + 1);
2518  else
2519  mb = s->macroblocks + (s->mb_height - mb_y - 1) * 2;
2520 
2521  if (mb_y == 0)
2522  prev_td = td;
2523  else
2524  prev_td = &s->thread_data[(jobnr + num_jobs - 1) % num_jobs];
2525  if (mb_y == s->mb_height - 1)
2526  next_td = td;
2527  else
2528  next_td = &s->thread_data[(jobnr + 1) % num_jobs];
2529 
2530  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb++) {
2531  VP8FilterStrength *f = &td->filter_strength[mb_x];
2532  if (prev_td != td)
2533  check_thread_pos(td, prev_td,
2534  (mb_x + 1) + (s->mb_width + 3), mb_y - 1);
2535  if (next_td != td)
2536  if (next_td != &s->thread_data[0])
2537  check_thread_pos(td, next_td, mb_x + 1, mb_y + 1);
2538 
2539  if (num_jobs == 1) {
2540  if (s->filter.simple)
2541  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2542  NULL, NULL, s->linesize, 0, 1);
2543  else
2544  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2545  dst[1], dst[2], s->linesize, s->uvlinesize, 0);
2546  }
2547 
2548  if (s->filter.simple)
2549  filter_mb_simple(s, dst[0], f, mb_x, mb_y);
2550  else
2551  filter_mb(s, dst, f, mb_x, mb_y, is_vp7);
2552  dst[0] += 16;
2553  dst[1] += 8;
2554  dst[2] += 8;
2555 
2556  update_pos(td, mb_y, (s->mb_width + 3) + mb_x);
2557  }
2558 }
2559 
2560 static void vp7_filter_mb_row(AVCodecContext *avctx, void *tdata,
2561  int jobnr, int threadnr)
2562 {
2563  filter_mb_row(avctx, tdata, jobnr, threadnr, 1);
2564 }
2565 
2566 static void vp8_filter_mb_row(AVCodecContext *avctx, void *tdata,
2567  int jobnr, int threadnr)
2568 {
2569  filter_mb_row(avctx, tdata, jobnr, threadnr, 0);
2570 }
2571 
2572 static av_always_inline
2573 int vp78_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr,
2574  int threadnr, int is_vp7)
2575 {
2576  VP8Context *s = avctx->priv_data;
2577  VP8ThreadData *td = &s->thread_data[jobnr];
2578  VP8ThreadData *next_td = NULL, *prev_td = NULL;
2579  VP8Frame *curframe = s->curframe;
2580  int mb_y, num_jobs = s->num_jobs;
2581  int ret;
2582 
2583  td->thread_nr = threadnr;
2584  td->mv_bounds.mv_min.y = -MARGIN - 64 * threadnr;
2585  td->mv_bounds.mv_max.y = ((s->mb_height - 1) << 6) + MARGIN - 64 * threadnr;
2586  for (mb_y = jobnr; mb_y < s->mb_height; mb_y += num_jobs) {
2587  atomic_store(&td->thread_mb_pos, mb_y << 16);
2588  ret = s->decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr);
2589  if (ret < 0) {
2590  update_pos(td, s->mb_height, INT_MAX & 0xFFFF);
2591  return ret;
2592  }
2593  if (s->deblock_filter)
2594  s->filter_mb_row(avctx, tdata, jobnr, threadnr);
2595  update_pos(td, mb_y, INT_MAX & 0xFFFF);
2596 
2597  td->mv_bounds.mv_min.y -= 64 * num_jobs;
2598  td->mv_bounds.mv_max.y -= 64 * num_jobs;
2599 
2600  if (avctx->active_thread_type == FF_THREAD_FRAME)
2601  ff_thread_report_progress(&curframe->tf, mb_y, 0);
2602  }
2603 
2604  return 0;
2605 }
2606 
2607 static int vp7_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata,
2608  int jobnr, int threadnr)
2609 {
2610  return vp78_decode_mb_row_sliced(avctx, tdata, jobnr, threadnr, IS_VP7);
2611 }
2612 
2613 static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata,
2614  int jobnr, int threadnr)
2615 {
2616  return vp78_decode_mb_row_sliced(avctx, tdata, jobnr, threadnr, IS_VP8);
2617 }
2618 
2619 static av_always_inline
2620 int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2621  AVPacket *avpkt, int is_vp7)
2622 {
2623  VP8Context *s = avctx->priv_data;
2624  int ret, i, referenced, num_jobs;
2625  enum AVDiscard skip_thresh;
2626  VP8Frame *av_uninit(curframe), *prev_frame;
2627 
2628  if (is_vp7)
2629  ret = vp7_decode_frame_header(s, avpkt->data, avpkt->size);
2630  else
2631  ret = vp8_decode_frame_header(s, avpkt->data, avpkt->size);
2632 
2633  if (ret < 0)
2634  goto err;
2635 
2636  if (s->actually_webp) {
2637  // avctx->pix_fmt already set in caller.
2638  } else if (!is_vp7 && s->pix_fmt == AV_PIX_FMT_NONE) {
2639  s->pix_fmt = get_pixel_format(s);
2640  if (s->pix_fmt < 0) {
2641  ret = AVERROR(EINVAL);
2642  goto err;
2643  }
2644  avctx->pix_fmt = s->pix_fmt;
2645  }
2646 
2647  prev_frame = s->framep[VP56_FRAME_CURRENT];
2648 
2649  referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT ||
2651 
2652  skip_thresh = !referenced ? AVDISCARD_NONREF
2653  : !s->keyframe ? AVDISCARD_NONKEY
2654  : AVDISCARD_ALL;
2655 
2656  if (avctx->skip_frame >= skip_thresh) {
2657  s->invisible = 1;
2658  memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
2659  goto skip_decode;
2660  }
2661  s->deblock_filter = s->filter.level && avctx->skip_loop_filter < skip_thresh;
2662 
2663  // release no longer referenced frames
2664  for (i = 0; i < 5; i++)
2665  if (s->frames[i].tf.f->buf[0] &&
2666  &s->frames[i] != prev_frame &&
2667  &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
2668  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
2669  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2])
2670  vp8_release_frame(s, &s->frames[i]);
2671 
2672  curframe = s->framep[VP56_FRAME_CURRENT] = vp8_find_free_buffer(s);
2673 
2674  if (!s->colorspace)
2675  avctx->colorspace = AVCOL_SPC_BT470BG;
2676  if (s->fullrange)
2677  avctx->color_range = AVCOL_RANGE_JPEG;
2678  else
2679  avctx->color_range = AVCOL_RANGE_MPEG;
2680 
2681  /* Given that arithmetic probabilities are updated every frame, it's quite
2682  * likely that the values we have on a random interframe are complete
2683  * junk if we didn't start decode on a keyframe. So just don't display
2684  * anything rather than junk. */
2685  if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] ||
2686  !s->framep[VP56_FRAME_GOLDEN] ||
2687  !s->framep[VP56_FRAME_GOLDEN2])) {
2688  av_log(avctx, AV_LOG_WARNING,
2689  "Discarding interframe without a prior keyframe!\n");
2690  ret = AVERROR_INVALIDDATA;
2691  goto err;
2692  }
2693 
2694  curframe->tf.f->key_frame = s->keyframe;
2695  curframe->tf.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I
2697  if ((ret = vp8_alloc_frame(s, curframe, referenced)) < 0)
2698  goto err;
2699 
2700  // check if golden and altref are swapped
2701  if (s->update_altref != VP56_FRAME_NONE)
2703  else
2705 
2706  if (s->update_golden != VP56_FRAME_NONE)
2708  else
2710 
2711  if (s->update_last)
2712  s->next_framep[VP56_FRAME_PREVIOUS] = curframe;
2713  else
2715 
2716  s->next_framep[VP56_FRAME_CURRENT] = curframe;
2717 
2718  ff_thread_finish_setup(avctx);
2719 
2720  if (avctx->hwaccel) {
2721  ret = avctx->hwaccel->start_frame(avctx, avpkt->data, avpkt->size);
2722  if (ret < 0)
2723  goto err;
2724 
2725  ret = avctx->hwaccel->decode_slice(avctx, avpkt->data, avpkt->size);
2726  if (ret < 0)
2727  goto err;
2728 
2729  ret = avctx->hwaccel->end_frame(avctx);
2730  if (ret < 0)
2731  goto err;
2732 
2733  } else {
2734  s->linesize = curframe->tf.f->linesize[0];
2735  s->uvlinesize = curframe->tf.f->linesize[1];
2736 
2737  memset(s->top_nnz, 0, s->mb_width * sizeof(*s->top_nnz));
2738  /* Zero macroblock structures for top/top-left prediction
2739  * from outside the frame. */
2740  if (!s->mb_layout)
2741  memset(s->macroblocks + s->mb_height * 2 - 1, 0,
2742  (s->mb_width + 1) * sizeof(*s->macroblocks));
2743  if (!s->mb_layout && s->keyframe)
2744  memset(s->intra4x4_pred_mode_top, DC_PRED, s->mb_width * 4);
2745 
2746  memset(s->ref_count, 0, sizeof(s->ref_count));
2747 
2748  if (s->mb_layout == 1) {
2749  // Make sure the previous frame has read its segmentation map,
2750  // if we re-use the same map.
2751  if (prev_frame && s->segmentation.enabled &&
2753  ff_thread_await_progress(&prev_frame->tf, 1, 0);
2754  if (is_vp7)
2755  ret = vp7_decode_mv_mb_modes(avctx, curframe, prev_frame);
2756  else
2757  ret = vp8_decode_mv_mb_modes(avctx, curframe, prev_frame);
2758  if (ret < 0)
2759  goto err;
2760  }
2761 
2762  if (avctx->active_thread_type == FF_THREAD_FRAME)
2763  num_jobs = 1;
2764  else
2765  num_jobs = FFMIN(s->num_coeff_partitions, avctx->thread_count);
2766  s->num_jobs = num_jobs;
2767  s->curframe = curframe;
2768  s->prev_frame = prev_frame;
2769  s->mv_bounds.mv_min.y = -MARGIN;
2770  s->mv_bounds.mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
2771  for (i = 0; i < MAX_THREADS; i++) {
2772  VP8ThreadData *td = &s->thread_data[i];
2773  atomic_init(&td->thread_mb_pos, 0);
2774  atomic_init(&td->wait_mb_pos, INT_MAX);
2775  }
2776  if (is_vp7)
2778  num_jobs);
2779  else
2781  num_jobs);
2782  }
2783 
2784  ff_thread_report_progress(&curframe->tf, INT_MAX, 0);
2785  memcpy(&s->framep[0], &s->next_framep[0], sizeof(s->framep[0]) * 4);
2786 
2787 skip_decode:
2788  // if future frames don't use the updated probabilities,
2789  // reset them to the values we saved
2790  if (!s->update_probabilities)
2791  s->prob[0] = s->prob[1];
2792 
2793  if (!s->invisible) {
2794  if ((ret = av_frame_ref(data, curframe->tf.f)) < 0)
2795  return ret;
2796  *got_frame = 1;
2797  }
2798 
2799  return avpkt->size;
2800 err:
2801  memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
2802  return ret;
2803 }
2804 
2805 int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2806  AVPacket *avpkt)
2807 {
2808  return vp78_decode_frame(avctx, data, got_frame, avpkt, IS_VP8);
2809 }
2810 
2811 #if CONFIG_VP7_DECODER
2812 static int vp7_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2813  AVPacket *avpkt)
2814 {
2815  return vp78_decode_frame(avctx, data, got_frame, avpkt, IS_VP7);
2816 }
2817 #endif /* CONFIG_VP7_DECODER */
2818 
2820 {
2821  VP8Context *s = avctx->priv_data;
2822  int i;
2823 
2824  if (!s)
2825  return 0;
2826 
2827  vp8_decode_flush_impl(avctx, 1);
2828  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
2829  av_frame_free(&s->frames[i].tf.f);
2830 
2831  return 0;
2832 }
2833 
2835 {
2836  int i;
2837  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++) {
2838  s->frames[i].tf.f = av_frame_alloc();
2839  if (!s->frames[i].tf.f)
2840  return AVERROR(ENOMEM);
2841  }
2842  return 0;
2843 }
2844 
2845 static av_always_inline
2846 int vp78_decode_init(AVCodecContext *avctx, int is_vp7)
2847 {
2848  VP8Context *s = avctx->priv_data;
2849  int ret;
2850 
2851  s->avctx = avctx;
2852  s->vp7 = avctx->codec->id == AV_CODEC_ID_VP7;
2853  s->pix_fmt = AV_PIX_FMT_NONE;
2854  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2855  avctx->internal->allocate_progress = 1;
2856 
2857  ff_videodsp_init(&s->vdsp, 8);
2858 
2859  ff_vp78dsp_init(&s->vp8dsp);
2860  if (CONFIG_VP7_DECODER && is_vp7) {
2862  ff_vp7dsp_init(&s->vp8dsp);
2865  } else if (CONFIG_VP8_DECODER && !is_vp7) {
2867  ff_vp8dsp_init(&s->vp8dsp);
2870  }
2871 
2872  /* does not change for VP8 */
2873  memcpy(s->prob[0].scan, ff_zigzag_scan, sizeof(s->prob[0].scan));
2874 
2875  if ((ret = vp8_init_frames(s)) < 0) {
2876  ff_vp8_decode_free(avctx);
2877  return ret;
2878  }
2879 
2880  return 0;
2881 }
2882 
2883 #if CONFIG_VP7_DECODER
2884 static int vp7_decode_init(AVCodecContext *avctx)
2885 {
2886  return vp78_decode_init(avctx, IS_VP7);
2887 }
2888 #endif /* CONFIG_VP7_DECODER */
2889 
2891 {
2892  return vp78_decode_init(avctx, IS_VP8);
2893 }
2894 
2895 #if CONFIG_VP8_DECODER
2896 #if HAVE_THREADS
2897 static av_cold int vp8_decode_init_thread_copy(AVCodecContext *avctx)
2898 {
2899  VP8Context *s = avctx->priv_data;
2900  int ret;
2901 
2902  s->avctx = avctx;
2903 
2904  if ((ret = vp8_init_frames(s)) < 0) {
2905  ff_vp8_decode_free(avctx);
2906  return ret;
2907  }
2908 
2909  return 0;
2910 }
2911 
2912 #define REBASE(pic) ((pic) ? (pic) - &s_src->frames[0] + &s->frames[0] : NULL)
2913 
2914 static int vp8_decode_update_thread_context(AVCodecContext *dst,
2915  const AVCodecContext *src)
2916 {
2917  VP8Context *s = dst->priv_data, *s_src = src->priv_data;
2918  int i;
2919 
2920  if (s->macroblocks_base &&
2921  (s_src->mb_width != s->mb_width || s_src->mb_height != s->mb_height)) {
2922  free_buffers(s);
2923  s->mb_width = s_src->mb_width;
2924  s->mb_height = s_src->mb_height;
2925  }
2926 
2927  s->pix_fmt = s_src->pix_fmt;
2928  s->prob[0] = s_src->prob[!s_src->update_probabilities];
2929  s->segmentation = s_src->segmentation;
2930  s->lf_delta = s_src->lf_delta;
2931  memcpy(s->sign_bias, s_src->sign_bias, sizeof(s->sign_bias));
2932 
2933  for (i = 0; i < FF_ARRAY_ELEMS(s_src->frames); i++) {
2934  if (s_src->frames[i].tf.f->buf[0]) {
2935  int ret = vp8_ref_frame(s, &s->frames[i], &s_src->frames[i]);
2936  if (ret < 0)
2937  return ret;
2938  }
2939  }
2940 
2941  s->framep[0] = REBASE(s_src->next_framep[0]);
2942  s->framep[1] = REBASE(s_src->next_framep[1]);
2943  s->framep[2] = REBASE(s_src->next_framep[2]);
2944  s->framep[3] = REBASE(s_src->next_framep[3]);
2945 
2946  return 0;
2947 }
2948 #endif /* HAVE_THREADS */
2949 #endif /* CONFIG_VP8_DECODER */
2950 
2951 #if CONFIG_VP7_DECODER
2953  .name = "vp7",
2954  .long_name = NULL_IF_CONFIG_SMALL("On2 VP7"),
2955  .type = AVMEDIA_TYPE_VIDEO,
2956  .id = AV_CODEC_ID_VP7,
2957  .priv_data_size = sizeof(VP8Context),
2958  .init = vp7_decode_init,
2959  .close = ff_vp8_decode_free,
2960  .decode = vp7_decode_frame,
2961  .capabilities = AV_CODEC_CAP_DR1,
2963 };
2964 #endif /* CONFIG_VP7_DECODER */
2965 
2966 #if CONFIG_VP8_DECODER
2968  .name = "vp8",
2969  .long_name = NULL_IF_CONFIG_SMALL("On2 VP8"),
2970  .type = AVMEDIA_TYPE_VIDEO,
2971  .id = AV_CODEC_ID_VP8,
2972  .priv_data_size = sizeof(VP8Context),
2974  .close = ff_vp8_decode_free,
2976  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
2979  .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp8_decode_init_thread_copy),
2980  .update_thread_context = ONLY_IF_THREADS_ENABLED(vp8_decode_update_thread_context),
2981  .hw_configs = (const AVCodecHWConfigInternal*[]) {
2982 #if CONFIG_VP8_VAAPI_HWACCEL
2983  HWACCEL_VAAPI(vp8),
2984 #endif
2985 #if CONFIG_VP8_NVDEC_HWACCEL
2986  HWACCEL_NVDEC(vp8),
2987 #endif
2988  NULL
2989  },
2990 };
2991 #endif /* CONFIG_VP7_DECODER */
const uint8_t * input
Definition: vp8.h:255
uint8_t golden
Definition: vp8.h:274
atomic_int wait_mb_pos
Definition: vp8.h:130
uint8_t inner_limit
Definition: vp8.h:77
#define VERT_PRED8x8
Definition: h264pred.h:70
VP8Macroblock * macroblocks
Definition: vp8.h:192
static const uint8_t vp8_dc_qlookup[VP8_MAX_QUANT+1]
Definition: vp8data.h:711
static av_always_inline void intra_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y, int is_vp7)
Definition: vp8.c:1667
static const uint8_t vp8_submv_prob[5][3]
Definition: vp8data.h:153
static const uint16_t vp7_ydc_qlookup[]
Definition: vp8data.h:778
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
#define NULL
Definition: coverity.c:32
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1370
const struct AVCodec * codec
Definition: avcodec.h:1577
discard all frames except keyframes
Definition: avcodec.h:813
void(* vp8_idct_dc_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
Definition: vp8dsp.h:41
Definition: vp9.h:47
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static const uint8_t vp7_mv_default_prob[2][17]
Definition: vp8data.h:744
#define DC_128_PRED8x8
Definition: h264pred.h:76
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
Definition: os2threads.h:108
static void copy(const float *p1, float *p2, const int length)
(only used in prediction) no split MVs
Definition: vp8.h:72
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
unsigned int code_word
Definition: vp56.h:91
void ff_vp7dsp_init(VP8DSPContext *c)
static void update_lf_deltas(VP8Context *s)
Definition: vp8.c:281
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
#define atomic_store(object, desired)
Definition: stdatomic.h:85
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
static void flush(AVCodecContext *avctx)
static int vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2495
static const uint8_t vp7_pred4x4_mode[]
Definition: vp8data.h:33
int8_t sign_bias[4]
one state [0, 1] per ref frame type
Definition: vp8.h:169
#define HWACCEL_NVDEC(codec)
Definition: hwaccel.h:71
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1756
static av_always_inline int inter_predict_dc(int16_t block[16], int16_t pred[2])
Definition: vp8.c:1392
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
void(* vp8_idct_dc_add4y)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
Definition: vp8dsp.h:42
#define VP7_MV_PRED_COUNT
Definition: vp8data.h:68
AVFrame * f
Definition: thread.h:35
static av_always_inline int vp8_rac_get_tree(VP56RangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
Definition: vp56.h:394
uint8_t feature_value[4][4]
Definition: vp8.h:341
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:486
uint32_t value
Definition: vp8.h:257
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
ptrdiff_t linesize
Definition: vp8.h:160
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have add an init_thread_copy() which re-allocates them for other threads.Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities.There will be very little speed gain at this point but it should work.If there are inter-frame dependencies
struct VP8Context::@189 quant
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:503
#define avpriv_request_sample(...)
uint8_t * intra4x4_pred_mode_top
Definition: vp8.h:194
uint8_t mbskip_enabled
Definition: vp8.h:165
static VP56Frame ref_to_update(VP8Context *s, int update, VP56Frame ref)
Determine which buffers golden and altref should be updated with after this frame.
Definition: vp8.c:402
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2203
static int vp7_decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2], const uint8_t scan[16])
Definition: vp8.c:1414
uint8_t token[4][16][3][NUM_DCT_TOKENS-1]
Definition: vp8.h:277
uint8_t scan[16]
Definition: vp8.h:279
int size
Definition: avcodec.h:1481
int uvac_delta
Definition: vp8.h:216
static void vp8_decode_flush(AVCodecContext *avctx)
Definition: vp8.c:141
#define MARGIN
Definition: vp8.c:2272
vp8_mc_func put_vp8_bilinear_pixels_tab[3][3][3]
Definition: vp8dsp.h:81
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:36
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1778
VP56mv bmv[16]
Definition: vp8.h:93
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:411
struct VP8Context::@188 qmat[4]
Macroblocks can have one of 4 different quants in a frame when segmentation is enabled.
void(* vp8_v_loop_filter8uv_inner)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:62
av_cold void ff_vp78dsp_init(VP8DSPContext *dsp)
Definition: vp8dsp.c:666
uint8_t inner_filter
Definition: vp8.h:78
static const int8_t vp8_pred8x8c_tree[3][2]
Definition: vp8data.h:180
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
uint8_t segmentid[3]
Definition: vp8.h:270
static const uint16_t vp7_y2dc_qlookup[]
Definition: vp8data.h:803
discard all
Definition: avcodec.h:814
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
Definition: videodsp.h:76
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2732
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them.reget_buffer() and buffer age optimizations no longer work.*The contents of buffers must not be written to after ff_thread_report_progress() has been called on them.This includes draw_edges().Porting codecs to frame threading
static void copy_chroma(AVFrame *dst, AVFrame *src, int width, int height)
Definition: vp8.c:477
#define src
Definition: vp8dsp.c:254
#define HOR_PRED8x8
Definition: h264pred.h:69
AVCodec.
Definition: avcodec.h:3492
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:87
uint8_t sharpness
Definition: vp8.h:189
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
2 16x8 blocks (vertical)
Definition: vp8.h:68
#define AV_COPY32(d, s)
Definition: intreadwrite.h:601
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
Definition: os2threads.h:140
int update_probabilities
If this flag is not set, all the probability updates are discarded after this frame is decoded...
Definition: vp8.h:292
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
VP8intmv mv_min
Definition: vp8.h:102
VP8Frame * framep[4]
Definition: vp8.h:153
static int vp8_decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2])
Definition: vp8.c:1426
static void vp7_filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2560
#define VP7_MVC_SIZE
Definition: vp8.c:443
static int vp7_read_mv_component(VP56RangeCoder *c, const uint8_t *p)
Definition: vp8.c:885
vp8_mc_func put_vp8_epel_pixels_tab[3][3][3]
first dimension: 4-log2(width) second dimension: 0 if no vertical interpolation is needed; 1 4-tap ve...
Definition: vp8dsp.h:80
static av_always_inline const uint8_t * get_submv_prob(uint32_t left, uint32_t top, int is_vp7)
Definition: vp8.c:896
static const uint8_t vp8_pred8x8c_prob_inter[3]
Definition: vp8data.h:189
static av_always_inline int decode_block_coeffs(VP56RangeCoder *c, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, int zero_nhood, int16_t qmul[2], const uint8_t scan[16], int vp7)
Definition: vp8.c:1451
uint8_t(* top_nnz)[9]
Definition: vp8.h:245
int num_jobs
Definition: vp8.h:310
static const uint8_t vp8_mbsplits[5][16]
Definition: vp8data.h:127
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:3043
#define AV_RN32A(p)
Definition: intreadwrite.h:526
uint8_t pred16x16[4]
Definition: vp8.h:275
static const int8_t vp8_pred16x16_tree_intra[4][2]
Definition: vp8data.h:47
uint8_t update_map
Definition: vp8.h:180
#define PLANE_PRED8x8
Definition: h264pred.h:71
uint16_t mb_height
Definition: vp8.h:159
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
int16_t y
Definition: vp56.h:68
int update_golden
VP56_FRAME_NONE if not updated, or which frame to copy if so.
Definition: vp8.h:285
static av_always_inline void filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2501
uint8_t intra4x4_pred_mode_top[4]
Definition: vp8.h:91
static enum AVPixelFormat get_pixel_format(VP8Context *s)
Definition: vp8.c:170
static av_always_inline void clamp_mv(VP8mvbounds *s, VP56mv *dst, const VP56mv *src)
Definition: vp8.c:846
The exact code depends on how similar the blocks are and how related they are to the block
uint8_t
static int vp7_update_dimensions(VP8Context *s, int width, int height)
Definition: vp8.c:248
#define av_cold
Definition: attributes.h:82
ptrdiff_t uvlinesize
Definition: vp8.h:161
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
#define mb
#define DC_PRED8x8
Definition: h264pred.h:68
int fade_present
Fade bit present in bitstream (VP7)
Definition: vp8.h:326
static av_always_inline void vp7_decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
Definition: vp8.c:1014
static VP8Frame * vp8_find_free_buffer(VP8Context *s)
Definition: vp8.c:146
uint8_t ref_frame
Definition: vp8.h:86
static av_always_inline int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y, int *copy_buf, int vp7)
Definition: vp8.c:1631
#define f(width, name)
Definition: cbs_vp9.c:255
void(* vp8_luma_dc_wht_dc)(int16_t block[4][4][16], int16_t dc[16])
Definition: vp8dsp.h:39
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
void(* vp8_idct_dc_add4uv)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
Definition: vp8dsp.h:44
Multithreading support functions.
Definition: vp9.h:46
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: vp8.c:2805
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
uint8_t mvc[2][19]
Definition: vp8.h:278
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:252
VP56mv mv
Definition: vp8.h:92
int8_t base_quant[4]
Definition: vp8.h:182
static const uint8_t vp8_mv_update_prob[2][19]
Definition: vp8data.h:733
void(* pred8x8[4+3+4])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:97
int update_last
update VP56_FRAME_PREVIOUS with the current one
Definition: vp8.h:284
#define height
uint8_t * data
Definition: avcodec.h:1480
int8_t yoffset
Definition: vp8data.h:62
int ydc_delta
Definition: vp8.h:212
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
Definition: utils.c:1821
static av_always_inline int vpX_rac_is_end(VP56RangeCoder *c)
vp5689 returns 1 if the end of the stream has been reached, 0 otherwise.
Definition: vp56.h:237
static void parse_segment_info(VP8Context *s)
Definition: vp8.c:259
ptrdiff_t size
Definition: opengl_enc.c:100
VP8Frame * prev_frame
Definition: vp8.h:156
int num_coeff_partitions
All coefficients are contained in separate arith coding contexts.
Definition: vp8.h:298
static const uint8_t vp8_token_default_probs[4][8][3][NUM_DCT_TOKENS-1]
Definition: vp8data.h:369
vp8_mc_func put_pixels_tab[3][3][3]
Definition: vp8.h:304
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.h:93
void(* vp8_v_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:52
#define AV_COPY64(d, s)
Definition: intreadwrite.h:605
uint8_t feature_index_prob[4][3]
Definition: vp8.h:340
uint8_t intra4x4_pred_mode_mb[16]
Definition: vp8.h:90
#define av_log(a,...)
static av_always_inline int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt, int is_vp7)
Definition: vp8.c:2620
#define prob(name, subs,...)
Definition: cbs_vp9.c:374
uint8_t intra4x4_pred_mode_left[4]
Definition: vp8.h:195
#define VERT_VP8_PRED
for VP8, VERT_PRED is the average of
Definition: h264pred.h:60
struct VP8Context::@191 coder_state_at_header_end
uint32_t range
Definition: vp8.h:256
uint8_t colorspace
0 is the only value allowed (meaning bt601)
Definition: vp8.h:307
static const VP56mv * get_bmv_ptr(const VP8Macroblock *mb, int subblock)
Definition: vp8.c:1008
enum AVCodecID id
Definition: avcodec.h:3506
static const uint8_t vp8_mbsplit_count[4]
Definition: vp8data.h:142
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static const int8_t vp8_coeff_band_indexes[8][10]
Definition: vp8data.h:331
#define td
Definition: regdef.h:70
H264PredContext hpc
Definition: vp8.h:303
Definition: vp8.h:138
static const uint8_t vp8_pred4x4_mode[]
Definition: vp8data.h:40
static av_always_inline void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int mb_xy, int ref)
Definition: vp8.c:1951
uint8_t absolute_vals
Definition: vp8.h:179
uint16_t mb_width
Definition: vp8.h:158
static const uint8_t vp8_dct_cat2_prob[]
Definition: vp8data.h:345
static const uint8_t vp8_mv_default_prob[2][19]
Definition: vp8data.h:755
int y2dc_delta
Definition: vp8.h:213
#define atomic_load(object)
Definition: stdatomic.h:93
#define FF_SIGNBIT(x)
Definition: internal.h:88
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
uint8_t last
Definition: vp8.h:273
static const int sizes[][2]
Definition: img2dec.c:53
static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:705
static void fade(uint8_t *dst, ptrdiff_t dst_linesize, const uint8_t *src, ptrdiff_t src_linesize, int width, int height, int alpha, int beta)
Definition: vp8.c:488
uint8_t mode
Definition: vp8.h:85
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
static av_always_inline int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1596
static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2613
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:2846
const char * r
Definition: vf_curves.c:114
void(* vp8_idct_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
Definition: vp8dsp.h:40
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
VP8 compatible video decoder.
static const uint8_t vp8_mbfirstidx[4][16]
Definition: vp8data.h:135
AVCodecContext * avctx
Definition: vp8.h:149
VP8mvbounds mv_bounds
Definition: vp8.h:167
#define EDGE_EMU_LINESIZE
Definition: vp8.h:132
uint16_t inter_dc_pred[2][2]
Interframe DC prediction (VP7) [0] VP56_FRAME_PREVIOUS [1] VP56_FRAME_GOLDEN.
Definition: vp8.h:333
VideoDSPContext vdsp
Definition: vp8.h:301
const char * name
Name of the codec implementation.
Definition: avcodec.h:3499
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_RL24
Definition: bytestream.h:87
VP8Macroblock * macroblocks_base
Definition: vp8.h:282
static av_always_inline void vp8_mc_part(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], ThreadFrame *ref_frame, int x_off, int y_off, int bx_off, int by_off, int block_w, int block_h, int width, int height, VP56mv *mv)
Definition: vp8.c:1913
static av_always_inline void decode_mb_mode(VP8Context *s, VP8mvbounds *mv_bounds, VP8Macroblock *mb, int mb_x, int mb_y, uint8_t *segment, uint8_t *ref, int layout, int is_vp7)
Definition: vp8.c:1240
static const uint8_t vp8_pred4x4_prob_inter[9]
Definition: vp8data.h:192
uint8_t edge_emu_buffer[21 *EDGE_EMU_LINESIZE]
Definition: vp8.h:133
int16_t block[6][4][16]
Definition: vp8.h:107
static av_always_inline int decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2], const uint8_t scan[16], int vp7)
Definition: vp8.c:1332
static const int vp7_mode_contexts[31][4]
Definition: vp8data.h:84
int yac_qi
Definition: vp8.h:211
static void vp8_filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2566
static void vp7_get_quants(VP8Context *s)
Definition: vp8.c:337
#define FFMAX(a, b)
Definition: common.h:94
uint8_t keyframe
Definition: vp8.h:163
#define fail()
Definition: checkasm.h:122
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1040
int x
Definition: vp8.h:97
static int vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame, VP8Frame *prev_frame)
Definition: vp8.c:2317
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:225
VP56Frame
Definition: vp56.h:40
int16_t luma_qmul[2]
Definition: vp8.h:204
static const uint8_t vp8_pred16x16_prob_inter[4]
Definition: vp8data.h:164
Definition: hls.c:68
useful rectangle filling function
int ff_vp56_init_range_decoder(VP56RangeCoder *c, const uint8_t *buf, int buf_size)
Definition: vp56rac.c:40
#define MAX_THREADS
#define b
Definition: input.c:41
4x4 blocks of 4x4px each
Definition: vp8.h:71
uint8_t deblock_filter
Definition: vp8.h:164
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:2838
#define H_LOOP_FILTER_16Y_INNER(cond)
#define FFMIN(a, b)
Definition: common.h:96
uint8_t feature_present_prob[4]
Definition: vp8.h:339
static av_always_inline void vp8_mc_chroma(VP8Context *s, VP8ThreadData *td, uint8_t *dst1, uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3])
chroma MC function
Definition: vp8.c:1863
#define width
uint8_t fullrange
whether we can skip clamping in dsp functions
Definition: vp8.h:308
int16_t block_dc[16]
Definition: vp8.h:108
static av_unused int vp8_rac_get_sint(VP56RangeCoder *c, int bits)
Definition: vp56.h:350
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards.If some code can't be moved
int width
picture width / height.
Definition: avcodec.h:1741
uint8_t mbskip
Definition: vp8.h:271
int8_t ref[4]
filter strength adjustment for macroblocks that reference: [0] - intra / VP56_FRAME_CURRENT [1] - VP5...
Definition: vp8.h:241
static int vp7_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2489
static av_cold int vp8_init_frames(VP8Context *s)
Definition: vp8.c:2834
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
static void free_buffers(VP8Context *s)
Definition: vp8.c:50
int32_t
AVFormatContext * ctx
Definition: movenc.c:48
#define check_thread_pos(td, otd, mb_x_check, mb_y_check)
Definition: vp8.c:2357
static int vp7_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame, VP8Frame *prev_frame)
Definition: vp8.c:2311
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
Definition: os2threads.h:100
int y2ac_delta
Definition: vp8.h:214
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static int vp8_read_mv_component(VP56RangeCoder *c, const uint8_t *p)
Definition: vp8.c:890
#define s(width, name)
Definition: cbs_vp9.c:257
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:2839
static av_always_inline int vp78_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *curframe, VP8Frame *prev_frame, int is_vp7)
Definition: vp8.c:2274
int16_t luma_dc_qmul[2]
luma dc-only block quant
Definition: vp8.h:205
int16_t chroma_qmul[2]
Definition: vp8.h:206
static const uint8_t vp8_pred4x4_prob_intra[10][10][9]
Definition: vp8data.h:196
VP8mvbounds mv_bounds
Definition: vp8.h:135
uint8_t(* top_border)[16+8+8]
Definition: vp8.h:244
int n
Definition: avisynth_c.h:760
ThreadFrame tf
Definition: vp8.h:139
static av_always_inline int decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2361
struct VP8Context::@186 segmentation
Base parameters for segmentation, i.e.
static av_always_inline void filter_level_for_mb(VP8Context *s, VP8Macroblock *mb, VP8FilterStrength *f, int is_vp7)
Definition: vp8.c:2120
static const int8_t vp7_feature_index_tree[4][2]
Definition: vp8data.h:771
static const uint8_t vp7_feature_value_size[2][4]
Definition: vp8data.h:766
#define vp56_rac_get_prob
Definition: vp56.h:268
static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
Definition: vp8.c:128
static av_always_inline void decode_mb_coeffs(VP8Context *s, VP8ThreadData *td, VP56RangeCoder *c, VP8Macroblock *mb, uint8_t t_nnz[9], uint8_t l_nnz[9], int is_vp7)
Definition: vp8.c:1466
HW acceleration through CUDA.
Definition: pixfmt.h:235
int bit_count
Definition: vp8.h:258
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
#define cat(a, bpp, b)
Definition: vp9dsp_init.h:29
uint8_t segment
Definition: vp8.h:89
#define FF_ARRAY_ELEMS(a)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:2827
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:523
if(ret)
int8_t xoffset
Definition: vp8data.h:63
static const float pred[4]
Definition: siprdata.h:259
static int vp7_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2607
#define IS_VP8
Definition: vp8dsp.h:106
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:1044
static const int8_t mv[256][2]
Definition: 4xm.c:77
static av_always_inline int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1605
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
Definition: vp56.h:285
int coeff_partition_size[8]
Definition: vp8.h:300
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:2887
#define src1
Definition: h264pred.c:139
static av_always_inline void inter_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y)
Apply motion vectors to prediction buffer, chapter 18.
Definition: vp8.c:1974
VP8Frame * curframe
Definition: vp8.h:155
uint8_t simple
Definition: vp8.h:187
VP8Frame frames[5]
Definition: vp8.h:305
Libavcodec external API header.
static const uint8_t vp8_pred8x8c_prob_intra[3]
Definition: vp8data.h:186
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:3761
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have add an so the codec calls ff_thread_report set AVCodecInternal allocate_progress The frames must then be freed with ff_thread_release_buffer().Otherwise leave it at zero and decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
void(* vp8_mc_func)(uint8_t *dst, ptrdiff_t dstStride, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: vp8dsp.h:33
uint8_t level
Definition: vp8.h:188
void(* vp8_v_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:48
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
static void vp8_release_frame(VP8Context *s, VP8Frame *f)
Definition: vp8.c:95
AVBufferRef * seg_map
Definition: vp8.h:140
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
static const int16_t alpha[]
Definition: ilbcdata.h:55
static const uint16_t vp7_yac_qlookup[]
Definition: vp8data.h:790
main external API structure.
Definition: avcodec.h:1568
static av_always_inline unsigned int vp56_rac_renorm(VP56RangeCoder *c)
Definition: vp56.h:244
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff)*mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
uint8_t * data
The data buffer.
Definition: buffer.h:89
VP8Frame * next_framep[4]
Definition: vp8.h:154
int mb_layout
This describes the macroblock memory layout.
Definition: vp8.h:316
uint8_t left_nnz[9]
For coeff decode, we need to know whether the above block had non-zero coefficients.
Definition: vp8.h:123
static const uint8_t vp8_mbsplit_prob[3]
Definition: vp8data.h:145
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
struct VP8Context::@192 prob[2]
These are all of the updatable probabilities for binary decisions.
VP56RangeCoder c
header context, includes mb modes and motion vectors
Definition: vp8.h:247
void * buf
Definition: avisynth_c.h:766
int y
Definition: vp8.h:98
int bits
Definition: vp56.h:87
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:98
VP56RangeCoder coeff_partition[8]
Definition: vp8.h:299
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
uint8_t update
Definition: vp8.h:221
static const int8_t vp8_pred16x16_tree_inter[4][2]
Definition: vp8data.h:54
int vp7
Definition: vp8.h:321
AVBufferRef * hwaccel_priv_buf
Definition: vp8.h:142
static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:305
int coded_height
Definition: avcodec.h:1756
static int vp8_update_dimensions(VP8Context *s, int width, int height)
Definition: vp8.c:253
int index
Definition: gxfenc.c:89
VP8FilterStrength * filter_strength
Definition: vp8.h:134
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2196
void(* filter_mb_row)(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.h:319
static av_always_inline int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
Definition: vp8.c:1587
static void vp78_update_probability_tables(VP8Context *s)
Definition: vp8.c:427
#define MV_EDGE_CHECK(n)
static const int8_t vp8_pred4x4_tree[9][2]
Definition: vp8data.h:168
uint8_t enabled
whether each mb can have a different strength based on mode/ref
Definition: vp8.h:178
static av_always_inline void idct_mb(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb)
Definition: vp8.c:2058
static void vp78_update_pred16x16_pred8x8_mvc_probabilities(VP8Context *s, int mvc_size)
Definition: vp8.c:446
static av_always_inline int read_mv_component(VP56RangeCoder *c, const uint8_t *p, int vp7)
Motion vector coding, 17.1.
Definition: vp8.c:857
static const uint8_t subpel_idx[3][8]
Definition: vp8.c:1781
static void update_refs(VP8Context *s)
Definition: vp8.c:466
static av_always_inline int vp8_rac_get_coeff(VP56RangeCoder *c, const uint8_t *prob)
Definition: vp56.h:407
static const uint8_t vp8_coeff_band[16]
Definition: vp8data.h:325
void(* vp8_h_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:54
int allocate_progress
Whether to allocate progress for frame threading.
Definition: internal.h:151
static const uint16_t vp8_ac_qlookup[VP8_MAX_QUANT+1]
Definition: vp8data.h:722
static const uint8_t vp8_pred16x16_prob_intra[4]
Definition: vp8data.h:161
uint8_t score
Definition: vp8data.h:65
int actually_webp
Definition: vp8.h:151
int header_partition_size
Definition: vp8.h:261
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
uint8_t update_feature_data
Definition: vp8.h:181
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
mfxU16 profile
Definition: qsvenc.c:44
static av_always_inline void decode_intra4x4_modes(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int mb_x, int keyframe, int layout)
Definition: vp8.c:1205
static int vp8_rac_get_uint(VP56RangeCoder *c, int bits)
Definition: vp56.h:338
#define DC_127_PRED8x8
Definition: h264pred.h:85
#define HWACCEL_VAAPI(codec)
Definition: hwaccel.h:73
Definition: vp56.h:66
av_cold int ff_vp8_decode_init(AVCodecContext *avctx)
Definition: vp8.c:2890
int update_altref
Definition: vp8.h:286
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:3750
void * hwaccel_picture_private
Definition: vp8.h:143
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
uint8_t feature_enabled[4]
Macroblock features (VP7)
Definition: vp8.h:338
int8_t mode[VP8_MVMODE_SPLIT+1]
filter strength adjustment for the following macroblock modes: [0-3] - i16x16 (always zero) [4] - i4x...
Definition: vp8.h:232
2 8x16 blocks (horizontal)
Definition: vp8.h:69
av_cold int ff_vp8_decode_free(AVCodecContext *avctx)
Definition: vp8.c:2819
Definition: vp9.h:48
#define AV_ZERO128(d)
Definition: intreadwrite.h:637
uint8_t pred8x8c[3]
Definition: vp8.h:276
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:522
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:3722
void(* vp8_luma_dc_wht)(int16_t block[4][4][16], int16_t dc[16])
Definition: vp8dsp.h:38
void(* vp8_v_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
Definition: vp8dsp.h:69
discard all non reference
Definition: avcodec.h:810
void(* vp8_h_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
Definition: vp8dsp.h:70
uint8_t partitioning
Definition: vp8.h:87
#define AV_ZERO64(d)
Definition: intreadwrite.h:633
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
int16_t x
Definition: vp56.h:67
common internal api header.
static void vp8_get_quants(VP8Context *s)
Definition: vp8.c:356
void(* vp8_v_loop_filter16y_inner)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:58
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
#define LOCAL_ALIGNED(a, t, v,...)
Definition: internal.h:114
#define AV_COPY128(d, s)
Definition: intreadwrite.h:609
static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
Definition: vp8.c:70
uint8_t chroma_pred_mode
Definition: vp8.h:88
#define bit(string, value)
Definition: cbs_mpeg2.c:58
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
VP8intmv mv_max
Definition: vp8.h:103
static av_always_inline void backup_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, ptrdiff_t linesize, ptrdiff_t uvlinesize, int simple)
Definition: vp8.c:1541
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
Definition: os2threads.h:129
#define DC_129_PRED8x8
Definition: h264pred.h:86
int uvdc_delta
Definition: vp8.h:215
struct VP8Context::@190 lf_delta
enum AVDiscard skip_loop_filter
Skip loop filtering for selected frames.
Definition: avcodec.h:3029
static av_always_inline int vp8_rac_get(VP56RangeCoder *c)
Definition: vp56.h:322
int invisible
Definition: vp8.h:283
static av_always_inline int decode_splitmvs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int layout, int is_vp7)
Split motion vector prediction, 16.4.
Definition: vp8.c:913
static const SiprModeParam modes[MODE_COUNT]
Definition: sipr.c:69
int ref_count[3]
Definition: vp8.h:170
void * priv_data
Definition: avcodec.h:1595
static av_always_inline int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1621
#define MODE_I4x4
Definition: vp8.h:59
static int vp7_calculate_mb_offset(int mb_x, int mb_y, int mb_width, int xoffset, int yoffset, int boundary, int *edge_x, int *edge_y)
The vp7 reference decoder uses a padding macroblock column (added to right edge of the frame) to guar...
Definition: vp8.c:995
#define XCHG(a, b, xchg)
#define update_pos(td, mb_y, mb_x)
Definition: vp8.c:2358
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
Definition: avcodec.h:3770
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1603
#define HOR_VP8_PRED
unaveraged version of HOR_PRED, see
Definition: h264pred.h:63
VP8DSPContext vp8dsp
Definition: vp8.h:302
static av_always_inline int update_dimensions(VP8Context *s, int width, int height, int is_vp7)
Definition: vp8.c:187
int thread_nr
Definition: vp8.h:124
static av_always_inline void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, ptrdiff_t linesize, ptrdiff_t uvlinesize, int mb_x, int mb_y, int mb_width, int simple, int xchg)
Definition: vp8.c:1553
#define AV_ZERO32(d)
Definition: intreadwrite.h:629
static av_always_inline int vp78_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2573
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
AVDiscard
Definition: avcodec.h:805
static av_unused int vp8_rac_get_nn(VP56RangeCoder *c)
Definition: vp56.h:372
#define av_uninit(x)
Definition: attributes.h:148
const uint8_t * buffer
Definition: vp56.h:89
static av_always_inline void vp8_mc_luma(VP8Context *s, VP8ThreadData *td, uint8_t *dst, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3])
luma MC function
Definition: vp8.c:1805
static const uint8_t vp8_token_update_probs[4][8][3][NUM_DCT_TOKENS-1]
Definition: vp8data.h:540
static av_always_inline void filter_mb(VP8Context *s, uint8_t *dst[3], VP8FilterStrength *f, int mb_x, int mb_y, int is_vp7)
Definition: vp8.c:2153
#define atomic_init(obj, value)
Definition: stdatomic.h:33
#define av_freep(p)
#define IS_VP7
Definition: vp8dsp.h:105
#define av_always_inline
Definition: attributes.h:39
int8_t filter_level[4]
base loop filter level
Definition: vp8.h:183
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
static const int vp8_mode_contexts[6][4]
Definition: vp8data.h:118
int high
Definition: vp56.h:86
static const uint8_t vp8_dct_cat1_prob[]
Definition: vp8data.h:342
#define FFSWAP(type, a, b)
Definition: common.h:99
uint8_t intra
Definition: vp8.h:272
void(* vp8_h_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:50
static int vp7_fade_frame(VP8Context *s, int alpha, int beta)
Definition: vp8.c:504
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVCodec ff_vp7_decoder
static av_always_inline void vp8_decode_mvs(VP8Context *s, VP8mvbounds *mv_bounds, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
Definition: vp8.c:1105
uint8_t non_zero_count_cache[6][4]
This is the index plus one of the last non-zero coeff for each of the blocks in the current macrobloc...
Definition: vp8.h:116
uint8_t skip
Definition: vp8.h:82
atomic_int thread_mb_pos
Definition: vp8.h:129
void ff_vp8dsp_init(VP8DSPContext *c)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
static void vp78_reset_probability_tables(VP8Context *s)
Definition: vp8.c:418
This structure stores compressed data.
Definition: avcodec.h:1457
#define VP8_MVC_SIZE
Definition: vp8.c:444
static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:541
AVCodec ff_vp8_decoder
uint8_t profile
Definition: vp8.h:166
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1179
const uint8_t *const ff_vp8_dct_cat_prob[]
Definition: vp8data.h:362
mode
Use these values in ebur128_init (or&#39;ed).
Definition: ebur128.h:83
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:984
VP8ThreadData * thread_data
Definition: vp8.h:148
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
enum AVPixelFormat pix_fmt
Definition: vp8.h:150
struct VP8Context::@187 filter
Predicted.
Definition: avutil.h:275
2x2 blocks of 8x8px each
Definition: vp8.h:70
static av_always_inline void filter_mb_simple(VP8Context *s, uint8_t *dst, VP8FilterStrength *f, int mb_x, int mb_y)
Definition: vp8.c:2240
static const VP7MVPred vp7_mv_pred[VP7_MV_PRED_COUNT]
Definition: vp8data.h:69
static const uint16_t vp7_y2ac_qlookup[]
Definition: vp8data.h:816
static const uint8_t vp7_submv_prob[3]
Definition: vp8data.h:149
static av_always_inline int vp78_decode_init(AVCodecContext *avctx, int is_vp7)
Definition: vp8.c:2846
int(* decode_mb_row_no_filter)(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.h:318
#define AV_WN64(p, v)
Definition: intreadwrite.h:380
uint8_t filter_level
Definition: vp8.h:76