FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vp8.c
Go to the documentation of this file.
1 /*
2  * VP7/VP8 compatible video decoder
3  *
4  * Copyright (C) 2010 David Conrad
5  * Copyright (C) 2010 Ronald S. Bultje
6  * Copyright (C) 2010 Fiona Glaser
7  * Copyright (C) 2012 Daniel Kang
8  * Copyright (C) 2014 Peter Ross
9  *
10  * This file is part of FFmpeg.
11  *
12  * FFmpeg is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU Lesser General Public
14  * License as published by the Free Software Foundation; either
15  * version 2.1 of the License, or (at your option) any later version.
16  *
17  * FFmpeg is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20  * Lesser General Public License for more details.
21  *
22  * You should have received a copy of the GNU Lesser General Public
23  * License along with FFmpeg; if not, write to the Free Software
24  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25  */
26 
27 #include "libavutil/imgutils.h"
28 
29 #include "avcodec.h"
30 #include "internal.h"
31 #include "mathops.h"
32 #include "rectangle.h"
33 #include "thread.h"
34 #include "vp8.h"
35 #include "vp8data.h"
36 
37 #if ARCH_ARM
38 # include "arm/vp8.h"
39 #endif
40 
41 #if CONFIG_VP7_DECODER && CONFIG_VP8_DECODER
42 #define VPX(vp7, f) (vp7 ? vp7_ ## f : vp8_ ## f)
43 #elif CONFIG_VP7_DECODER
44 #define VPX(vp7, f) vp7_ ## f
45 #else // CONFIG_VP8_DECODER
46 #define VPX(vp7, f) vp8_ ## f
47 #endif
48 
49 static void free_buffers(VP8Context *s)
50 {
51  int i;
52  if (s->thread_data)
53  for (i = 0; i < MAX_THREADS; i++) {
54 #if HAVE_THREADS
55  pthread_cond_destroy(&s->thread_data[i].cond);
57 #endif
59  }
60  av_freep(&s->thread_data);
63  av_freep(&s->top_nnz);
64  av_freep(&s->top_border);
65 
66  s->macroblocks = NULL;
67 }
68 
69 static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
70 {
71  int ret;
72  if ((ret = ff_thread_get_buffer(s->avctx, &f->tf,
73  ref ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
74  return ret;
75  if (!(f->seg_map = av_buffer_allocz(s->mb_width * s->mb_height))) {
77  return AVERROR(ENOMEM);
78  }
79  return 0;
80 }
81 
83 {
86 }
87 
88 #if CONFIG_VP8_DECODER
89 static int vp8_ref_frame(VP8Context *s, VP8Frame *dst, VP8Frame *src)
90 {
91  int ret;
92 
93  vp8_release_frame(s, dst);
94 
95  if ((ret = ff_thread_ref_frame(&dst->tf, &src->tf)) < 0)
96  return ret;
97  if (src->seg_map &&
98  !(dst->seg_map = av_buffer_ref(src->seg_map))) {
99  vp8_release_frame(s, dst);
100  return AVERROR(ENOMEM);
101  }
102 
103  return 0;
104 }
105 #endif /* CONFIG_VP8_DECODER */
106 
107 static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
108 {
109  VP8Context *s = avctx->priv_data;
110  int i;
111 
112  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
113  vp8_release_frame(s, &s->frames[i]);
114  memset(s->framep, 0, sizeof(s->framep));
115 
116  if (free_mem)
117  free_buffers(s);
118 }
119 
120 static void vp8_decode_flush(AVCodecContext *avctx)
121 {
122  vp8_decode_flush_impl(avctx, 0);
123 }
124 
126 {
127  VP8Frame *frame = NULL;
128  int i;
129 
130  // find a free buffer
131  for (i = 0; i < 5; i++)
132  if (&s->frames[i] != s->framep[VP56_FRAME_CURRENT] &&
133  &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
134  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
135  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2]) {
136  frame = &s->frames[i];
137  break;
138  }
139  if (i == 5) {
140  av_log(s->avctx, AV_LOG_FATAL, "Ran out of free frames!\n");
141  abort();
142  }
143  if (frame->tf.f->data[0])
144  vp8_release_frame(s, frame);
145 
146  return frame;
147 }
148 
149 static av_always_inline
150 int update_dimensions(VP8Context *s, int width, int height, int is_vp7)
151 {
152  AVCodecContext *avctx = s->avctx;
153  int i, ret;
154 
155  if (width != s->avctx->width || ((width+15)/16 != s->mb_width || (height+15)/16 != s->mb_height) && s->macroblocks_base ||
156  height != s->avctx->height) {
158 
159  ret = ff_set_dimensions(s->avctx, width, height);
160  if (ret < 0)
161  return ret;
162  }
163 
164  s->mb_width = (s->avctx->coded_width + 15) / 16;
165  s->mb_height = (s->avctx->coded_height + 15) / 16;
166 
167  s->mb_layout = is_vp7 || avctx->active_thread_type == FF_THREAD_SLICE &&
168  avctx->thread_count > 1;
169  if (!s->mb_layout) { // Frame threading and one thread
170  s->macroblocks_base = av_mallocz((s->mb_width + s->mb_height * 2 + 1) *
171  sizeof(*s->macroblocks));
173  } else // Sliced threading
174  s->macroblocks_base = av_mallocz((s->mb_width + 2) * (s->mb_height + 2) *
175  sizeof(*s->macroblocks));
176  s->top_nnz = av_mallocz(s->mb_width * sizeof(*s->top_nnz));
177  s->top_border = av_mallocz((s->mb_width + 1) * sizeof(*s->top_border));
179 
180  if (!s->macroblocks_base || !s->top_nnz || !s->top_border ||
181  !s->thread_data || (!s->intra4x4_pred_mode_top && !s->mb_layout)) {
182  free_buffers(s);
183  return AVERROR(ENOMEM);
184  }
185 
186  for (i = 0; i < MAX_THREADS; i++) {
188  av_mallocz(s->mb_width * sizeof(*s->thread_data[0].filter_strength));
189  if (!s->thread_data[i].filter_strength) {
190  free_buffers(s);
191  return AVERROR(ENOMEM);
192  }
193 #if HAVE_THREADS
194  pthread_mutex_init(&s->thread_data[i].lock, NULL);
195  pthread_cond_init(&s->thread_data[i].cond, NULL);
196 #endif
197  }
198 
199  s->macroblocks = s->macroblocks_base + 1;
200 
201  return 0;
202 }
203 
205 {
206  return update_dimensions(s, width, height, IS_VP7);
207 }
208 
210 {
211  return update_dimensions(s, width, height, IS_VP8);
212 }
213 
214 
216 {
217  VP56RangeCoder *c = &s->c;
218  int i;
219 
221 
222  if (vp8_rac_get(c)) { // update segment feature data
224 
225  for (i = 0; i < 4; i++)
227 
228  for (i = 0; i < 4; i++)
230  }
231  if (s->segmentation.update_map)
232  for (i = 0; i < 3; i++)
233  s->prob->segmentid[i] = vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
234 }
235 
237 {
238  VP56RangeCoder *c = &s->c;
239  int i;
240 
241  for (i = 0; i < 4; i++) {
242  if (vp8_rac_get(c)) {
243  s->lf_delta.ref[i] = vp8_rac_get_uint(c, 6);
244 
245  if (vp8_rac_get(c))
246  s->lf_delta.ref[i] = -s->lf_delta.ref[i];
247  }
248  }
249 
250  for (i = MODE_I4x4; i <= VP8_MVMODE_SPLIT; i++) {
251  if (vp8_rac_get(c)) {
252  s->lf_delta.mode[i] = vp8_rac_get_uint(c, 6);
253 
254  if (vp8_rac_get(c))
255  s->lf_delta.mode[i] = -s->lf_delta.mode[i];
256  }
257  }
258 }
259 
260 static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
261 {
262  const uint8_t *sizes = buf;
263  int i;
264 
265  s->num_coeff_partitions = 1 << vp8_rac_get_uint(&s->c, 2);
266 
267  buf += 3 * (s->num_coeff_partitions - 1);
268  buf_size -= 3 * (s->num_coeff_partitions - 1);
269  if (buf_size < 0)
270  return -1;
271 
272  for (i = 0; i < s->num_coeff_partitions - 1; i++) {
273  int size = AV_RL24(sizes + 3 * i);
274  if (buf_size - size < 0)
275  return -1;
276 
277  ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, size);
278  buf += size;
279  buf_size -= size;
280  }
281  ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, buf_size);
282 
283  return 0;
284 }
285 
286 static void vp7_get_quants(VP8Context *s)
287 {
288  VP56RangeCoder *c = &s->c;
289 
290  int yac_qi = vp8_rac_get_uint(c, 7);
291  int ydc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
292  int y2dc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
293  int y2ac_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
294  int uvdc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
295  int uvac_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
296 
297  s->qmat[0].luma_qmul[0] = vp7_ydc_qlookup[ydc_qi];
298  s->qmat[0].luma_qmul[1] = vp7_yac_qlookup[yac_qi];
299  s->qmat[0].luma_dc_qmul[0] = vp7_y2dc_qlookup[y2dc_qi];
300  s->qmat[0].luma_dc_qmul[1] = vp7_y2ac_qlookup[y2ac_qi];
301  s->qmat[0].chroma_qmul[0] = FFMIN(vp7_ydc_qlookup[uvdc_qi], 132);
302  s->qmat[0].chroma_qmul[1] = vp7_yac_qlookup[uvac_qi];
303 }
304 
305 static void vp8_get_quants(VP8Context *s)
306 {
307  VP56RangeCoder *c = &s->c;
308  int i, base_qi;
309 
310  int yac_qi = vp8_rac_get_uint(c, 7);
311  int ydc_delta = vp8_rac_get_sint(c, 4);
312  int y2dc_delta = vp8_rac_get_sint(c, 4);
313  int y2ac_delta = vp8_rac_get_sint(c, 4);
314  int uvdc_delta = vp8_rac_get_sint(c, 4);
315  int uvac_delta = vp8_rac_get_sint(c, 4);
316 
317  for (i = 0; i < 4; i++) {
318  if (s->segmentation.enabled) {
319  base_qi = s->segmentation.base_quant[i];
320  if (!s->segmentation.absolute_vals)
321  base_qi += yac_qi;
322  } else
323  base_qi = yac_qi;
324 
325  s->qmat[i].luma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + ydc_delta, 7)];
326  s->qmat[i].luma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi, 7)];
327  s->qmat[i].luma_dc_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + y2dc_delta, 7)] * 2;
328  /* 101581>>16 is equivalent to 155/100 */
329  s->qmat[i].luma_dc_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + y2ac_delta, 7)] * 101581 >> 16;
330  s->qmat[i].chroma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + uvdc_delta, 7)];
331  s->qmat[i].chroma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + uvac_delta, 7)];
332 
333  s->qmat[i].luma_dc_qmul[1] = FFMAX(s->qmat[i].luma_dc_qmul[1], 8);
334  s->qmat[i].chroma_qmul[0] = FFMIN(s->qmat[i].chroma_qmul[0], 132);
335  }
336 }
337 
338 /**
339  * Determine which buffers golden and altref should be updated with after this frame.
340  * The spec isn't clear here, so I'm going by my understanding of what libvpx does
341  *
342  * Intra frames update all 3 references
343  * Inter frames update VP56_FRAME_PREVIOUS if the update_last flag is set
344  * If the update (golden|altref) flag is set, it's updated with the current frame
345  * if update_last is set, and VP56_FRAME_PREVIOUS otherwise.
346  * If the flag is not set, the number read means:
347  * 0: no update
348  * 1: VP56_FRAME_PREVIOUS
349  * 2: update golden with altref, or update altref with golden
350  */
352 {
353  VP56RangeCoder *c = &s->c;
354 
355  if (update)
356  return VP56_FRAME_CURRENT;
357 
358  switch (vp8_rac_get_uint(c, 2)) {
359  case 1:
360  return VP56_FRAME_PREVIOUS;
361  case 2:
363  }
364  return VP56_FRAME_NONE;
365 }
366 
368 {
369  int i, j;
370  for (i = 0; i < 4; i++)
371  for (j = 0; j < 16; j++)
372  memcpy(s->prob->token[i][j], vp8_token_default_probs[i][vp8_coeff_band[j]],
373  sizeof(s->prob->token[i][j]));
374 }
375 
377 {
378  VP56RangeCoder *c = &s->c;
379  int i, j, k, l, m;
380 
381  for (i = 0; i < 4; i++)
382  for (j = 0; j < 8; j++)
383  for (k = 0; k < 3; k++)
384  for (l = 0; l < NUM_DCT_TOKENS-1; l++)
386  int prob = vp8_rac_get_uint(c, 8);
387  for (m = 0; vp8_coeff_band_indexes[j][m] >= 0; m++)
388  s->prob->token[i][vp8_coeff_band_indexes[j][m]][k][l] = prob;
389  }
390 }
391 
392 #define VP7_MVC_SIZE 17
393 #define VP8_MVC_SIZE 19
394 
396  int mvc_size)
397 {
398  VP56RangeCoder *c = &s->c;
399  int i, j;
400 
401  if (vp8_rac_get(c))
402  for (i = 0; i < 4; i++)
403  s->prob->pred16x16[i] = vp8_rac_get_uint(c, 8);
404  if (vp8_rac_get(c))
405  for (i = 0; i < 3; i++)
406  s->prob->pred8x8c[i] = vp8_rac_get_uint(c, 8);
407 
408  // 17.2 MV probability update
409  for (i = 0; i < 2; i++)
410  for (j = 0; j < mvc_size; j++)
412  s->prob->mvc[i][j] = vp8_rac_get_nn(c);
413 }
414 
415 static void update_refs(VP8Context *s)
416 {
417  VP56RangeCoder *c = &s->c;
418 
419  int update_golden = vp8_rac_get(c);
420  int update_altref = vp8_rac_get(c);
421 
422  s->update_golden = ref_to_update(s, update_golden, VP56_FRAME_GOLDEN);
423  s->update_altref = ref_to_update(s, update_altref, VP56_FRAME_GOLDEN2);
424 }
425 
426 static void copy_chroma(AVFrame *dst, AVFrame *src, int width, int height)
427 {
428  int i, j;
429 
430  for (j = 1; j < 3; j++) {
431  for (i = 0; i < height / 2; i++)
432  memcpy(dst->data[j] + i * dst->linesize[j],
433  src->data[j] + i * src->linesize[j], width / 2);
434  }
435 }
436 
437 static void fade(uint8_t *dst, int dst_linesize,
438  const uint8_t *src, int src_linesize,
439  int width, int height,
440  int alpha, int beta)
441 {
442  int i, j;
443  for (j = 0; j < height; j++) {
444  for (i = 0; i < width; i++) {
445  uint8_t y = src[j * src_linesize + i];
446  dst[j * dst_linesize + i] = av_clip_uint8(y + ((y * beta) >> 8) + alpha);
447  }
448  }
449 }
450 
452 {
453  int alpha = (int8_t) vp8_rac_get_uint(c, 8);
454  int beta = (int8_t) vp8_rac_get_uint(c, 8);
455  int ret;
456 
457  if (!s->keyframe && (alpha || beta)) {
458  int width = s->mb_width * 16;
459  int height = s->mb_height * 16;
460  AVFrame *src, *dst;
461 
462  if (!s->framep[VP56_FRAME_PREVIOUS] ||
463  !s->framep[VP56_FRAME_GOLDEN]) {
464  av_log(s->avctx, AV_LOG_WARNING, "Discarding interframe without a prior keyframe!\n");
465  return AVERROR_INVALIDDATA;
466  }
467 
468  dst =
469  src = s->framep[VP56_FRAME_PREVIOUS]->tf.f;
470 
471  /* preserve the golden frame, write a new previous frame */
474  if ((ret = vp8_alloc_frame(s, s->framep[VP56_FRAME_PREVIOUS], 1)) < 0)
475  return ret;
476 
477  dst = s->framep[VP56_FRAME_PREVIOUS]->tf.f;
478 
479  copy_chroma(dst, src, width, height);
480  }
481 
482  fade(dst->data[0], dst->linesize[0],
483  src->data[0], src->linesize[0],
484  width, height, alpha, beta);
485  }
486 
487  return 0;
488 }
489 
490 static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
491 {
492  VP56RangeCoder *c = &s->c;
493  int part1_size, hscale, vscale, i, j, ret;
494  int width = s->avctx->width;
495  int height = s->avctx->height;
496 
497  if (buf_size < 4) {
498  return AVERROR_INVALIDDATA;
499  }
500 
501  s->profile = (buf[0] >> 1) & 7;
502  if (s->profile > 1) {
503  avpriv_request_sample(s->avctx, "Unknown profile %d", s->profile);
504  return AVERROR_INVALIDDATA;
505  }
506 
507  s->keyframe = !(buf[0] & 1);
508  s->invisible = 0;
509  part1_size = AV_RL24(buf) >> 4;
510 
511  if (buf_size < 4 - s->profile + part1_size) {
512  av_log(s->avctx, AV_LOG_ERROR, "Buffer size %d is too small, needed : %d\n", buf_size, 4 - s->profile + part1_size);
513  return AVERROR_INVALIDDATA;
514  }
515 
516  buf += 4 - s->profile;
517  buf_size -= 4 - s->profile;
518 
519  memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_epel_pixels_tab, sizeof(s->put_pixels_tab));
520 
521  ff_vp56_init_range_decoder(c, buf, part1_size);
522  buf += part1_size;
523  buf_size -= part1_size;
524 
525  /* A. Dimension information (keyframes only) */
526  if (s->keyframe) {
527  width = vp8_rac_get_uint(c, 12);
528  height = vp8_rac_get_uint(c, 12);
529  hscale = vp8_rac_get_uint(c, 2);
530  vscale = vp8_rac_get_uint(c, 2);
531  if (hscale || vscale)
532  avpriv_request_sample(s->avctx, "Upscaling");
533 
537  sizeof(s->prob->pred16x16));
539  sizeof(s->prob->pred8x8c));
540  for (i = 0; i < 2; i++)
541  memcpy(s->prob->mvc[i], vp7_mv_default_prob[i],
542  sizeof(vp7_mv_default_prob[i]));
543  memset(&s->segmentation, 0, sizeof(s->segmentation));
544  memset(&s->lf_delta, 0, sizeof(s->lf_delta));
545  memcpy(s->prob[0].scan, ff_zigzag_scan, sizeof(s->prob[0].scan));
546  }
547 
548  if (s->keyframe || s->profile > 0)
549  memset(s->inter_dc_pred, 0 , sizeof(s->inter_dc_pred));
550 
551  /* B. Decoding information for all four macroblock-level features */
552  for (i = 0; i < 4; i++) {
553  s->feature_enabled[i] = vp8_rac_get(c);
554  if (s->feature_enabled[i]) {
556 
557  for (j = 0; j < 3; j++)
558  s->feature_index_prob[i][j] =
559  vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
560 
561  if (vp7_feature_value_size[s->profile][i])
562  for (j = 0; j < 4; j++)
563  s->feature_value[i][j] =
565  }
566  }
567 
568  s->segmentation.enabled = 0;
569  s->segmentation.update_map = 0;
570  s->lf_delta.enabled = 0;
571 
572  s->num_coeff_partitions = 1;
573  ff_vp56_init_range_decoder(&s->coeff_partition[0], buf, buf_size);
574 
575  if (!s->macroblocks_base || /* first frame */
576  width != s->avctx->width || height != s->avctx->height ||
577  (width + 15) / 16 != s->mb_width || (height + 15) / 16 != s->mb_height) {
578  if ((ret = vp7_update_dimensions(s, width, height)) < 0)
579  return ret;
580  }
581 
582  /* C. Dequantization indices */
583  vp7_get_quants(s);
584 
585  /* D. Golden frame update flag (a Flag) for interframes only */
586  if (!s->keyframe) {
589  }
590 
591  s->update_last = 1;
592  s->update_probabilities = 1;
593  s->fade_present = 1;
594 
595  if (s->profile > 0) {
597  if (!s->update_probabilities)
598  s->prob[1] = s->prob[0];
599 
600  if (!s->keyframe)
601  s->fade_present = vp8_rac_get(c);
602  }
603 
604  /* E. Fading information for previous frame */
605  if (s->fade_present && vp8_rac_get(c)) {
606  if ((ret = vp7_fade_frame(s ,c)) < 0)
607  return ret;
608  }
609 
610  /* F. Loop filter type */
611  if (!s->profile)
612  s->filter.simple = vp8_rac_get(c);
613 
614  /* G. DCT coefficient ordering specification */
615  if (vp8_rac_get(c))
616  for (i = 1; i < 16; i++)
617  s->prob[0].scan[i] = ff_zigzag_scan[vp8_rac_get_uint(c, 4)];
618 
619  /* H. Loop filter levels */
620  if (s->profile > 0)
621  s->filter.simple = vp8_rac_get(c);
622  s->filter.level = vp8_rac_get_uint(c, 6);
623  s->filter.sharpness = vp8_rac_get_uint(c, 3);
624 
625  /* I. DCT coefficient probability update; 13.3 Token Probability Updates */
627 
628  s->mbskip_enabled = 0;
629 
630  /* J. The remaining frame header data occurs ONLY FOR INTERFRAMES */
631  if (!s->keyframe) {
632  s->prob->intra = vp8_rac_get_uint(c, 8);
633  s->prob->last = vp8_rac_get_uint(c, 8);
635  }
636 
637  return 0;
638 }
639 
640 static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
641 {
642  VP56RangeCoder *c = &s->c;
643  int header_size, hscale, vscale, ret;
644  int width = s->avctx->width;
645  int height = s->avctx->height;
646 
647  if (buf_size < 3) {
648  av_log(s->avctx, AV_LOG_ERROR, "Insufficent data (%d) for header\n", buf_size);
649  return AVERROR_INVALIDDATA;
650  }
651 
652  s->keyframe = !(buf[0] & 1);
653  s->profile = (buf[0]>>1) & 7;
654  s->invisible = !(buf[0] & 0x10);
655  header_size = AV_RL24(buf) >> 5;
656  buf += 3;
657  buf_size -= 3;
658 
659  if (s->profile > 3)
660  av_log(s->avctx, AV_LOG_WARNING, "Unknown profile %d\n", s->profile);
661 
662  if (!s->profile)
664  sizeof(s->put_pixels_tab));
665  else // profile 1-3 use bilinear, 4+ aren't defined so whatever
667  sizeof(s->put_pixels_tab));
668 
669  if (header_size > buf_size - 7 * s->keyframe) {
670  av_log(s->avctx, AV_LOG_ERROR, "Header size larger than data provided\n");
671  return AVERROR_INVALIDDATA;
672  }
673 
674  if (s->keyframe) {
675  if (AV_RL24(buf) != 0x2a019d) {
677  "Invalid start code 0x%x\n", AV_RL24(buf));
678  return AVERROR_INVALIDDATA;
679  }
680  width = AV_RL16(buf + 3) & 0x3fff;
681  height = AV_RL16(buf + 5) & 0x3fff;
682  hscale = buf[4] >> 6;
683  vscale = buf[6] >> 6;
684  buf += 7;
685  buf_size -= 7;
686 
687  if (hscale || vscale)
688  avpriv_request_sample(s->avctx, "Upscaling");
689 
693  sizeof(s->prob->pred16x16));
695  sizeof(s->prob->pred8x8c));
696  memcpy(s->prob->mvc, vp8_mv_default_prob,
697  sizeof(s->prob->mvc));
698  memset(&s->segmentation, 0, sizeof(s->segmentation));
699  memset(&s->lf_delta, 0, sizeof(s->lf_delta));
700  }
701 
702  ff_vp56_init_range_decoder(c, buf, header_size);
703  buf += header_size;
704  buf_size -= header_size;
705 
706  if (s->keyframe) {
707  s->colorspace = vp8_rac_get(c);
708  if (s->colorspace)
709  av_log(s->avctx, AV_LOG_WARNING, "Unspecified colorspace\n");
710  s->fullrange = vp8_rac_get(c);
711  }
712 
713  if ((s->segmentation.enabled = vp8_rac_get(c)))
715  else
716  s->segmentation.update_map = 0; // FIXME: move this to some init function?
717 
718  s->filter.simple = vp8_rac_get(c);
719  s->filter.level = vp8_rac_get_uint(c, 6);
720  s->filter.sharpness = vp8_rac_get_uint(c, 3);
721 
722  if ((s->lf_delta.enabled = vp8_rac_get(c)))
723  if (vp8_rac_get(c))
724  update_lf_deltas(s);
725 
726  if (setup_partitions(s, buf, buf_size)) {
727  av_log(s->avctx, AV_LOG_ERROR, "Invalid partitions\n");
728  return AVERROR_INVALIDDATA;
729  }
730 
731  if (!s->macroblocks_base || /* first frame */
732  width != s->avctx->width || height != s->avctx->height ||
733  (width+15)/16 != s->mb_width || (height+15)/16 != s->mb_height)
734  if ((ret = vp8_update_dimensions(s, width, height)) < 0)
735  return ret;
736 
737  vp8_get_quants(s);
738 
739  if (!s->keyframe) {
740  update_refs(s);
742  s->sign_bias[VP56_FRAME_GOLDEN2 /* altref */] = vp8_rac_get(c);
743  }
744 
745  // if we aren't saving this frame's probabilities for future frames,
746  // make a copy of the current probabilities
747  if (!(s->update_probabilities = vp8_rac_get(c)))
748  s->prob[1] = s->prob[0];
749 
750  s->update_last = s->keyframe || vp8_rac_get(c);
751 
753 
754  if ((s->mbskip_enabled = vp8_rac_get(c)))
755  s->prob->mbskip = vp8_rac_get_uint(c, 8);
756 
757  if (!s->keyframe) {
758  s->prob->intra = vp8_rac_get_uint(c, 8);
759  s->prob->last = vp8_rac_get_uint(c, 8);
760  s->prob->golden = vp8_rac_get_uint(c, 8);
762  }
763 
764  return 0;
765 }
766 
767 static av_always_inline
768 void clamp_mv(VP8Context *s, VP56mv *dst, const VP56mv *src)
769 {
770  dst->x = av_clip(src->x, av_clip(s->mv_min.x, INT16_MIN, INT16_MAX),
771  av_clip(s->mv_max.x, INT16_MIN, INT16_MAX));
772  dst->y = av_clip(src->y, av_clip(s->mv_min.y, INT16_MIN, INT16_MAX),
773  av_clip(s->mv_max.y, INT16_MIN, INT16_MAX));
774 }
775 
776 /**
777  * Motion vector coding, 17.1.
778  */
780 {
781  int bit, x = 0;
782 
783  if (vp56_rac_get_prob_branchy(c, p[0])) {
784  int i;
785 
786  for (i = 0; i < 3; i++)
787  x += vp56_rac_get_prob(c, p[9 + i]) << i;
788  for (i = (vp7 ? 7 : 9); i > 3; i--)
789  x += vp56_rac_get_prob(c, p[9 + i]) << i;
790  if (!(x & (vp7 ? 0xF0 : 0xFFF0)) || vp56_rac_get_prob(c, p[12]))
791  x += 8;
792  } else {
793  // small_mvtree
794  const uint8_t *ps = p + 2;
795  bit = vp56_rac_get_prob(c, *ps);
796  ps += 1 + 3 * bit;
797  x += 4 * bit;
798  bit = vp56_rac_get_prob(c, *ps);
799  ps += 1 + bit;
800  x += 2 * bit;
801  x += vp56_rac_get_prob(c, *ps);
802  }
803 
804  return (x && vp56_rac_get_prob(c, p[1])) ? -x : x;
805 }
806 
808 {
809  return read_mv_component(c, p, 1);
810 }
811 
813 {
814  return read_mv_component(c, p, 0);
815 }
816 
817 static av_always_inline
818 const uint8_t *get_submv_prob(uint32_t left, uint32_t top, int is_vp7)
819 {
820  if (is_vp7)
821  return vp7_submv_prob;
822 
823  if (left == top)
824  return vp8_submv_prob[4 - !!left];
825  if (!top)
826  return vp8_submv_prob[2];
827  return vp8_submv_prob[1 - !!left];
828 }
829 
830 /**
831  * Split motion vector prediction, 16.4.
832  * @returns the number of motion vectors parsed (2, 4 or 16)
833  */
834 static av_always_inline
836  int layout, int is_vp7)
837 {
838  int part_idx;
839  int n, num;
840  VP8Macroblock *top_mb;
841  VP8Macroblock *left_mb = &mb[-1];
842  const uint8_t *mbsplits_left = vp8_mbsplits[left_mb->partitioning];
843  const uint8_t *mbsplits_top, *mbsplits_cur, *firstidx;
844  VP56mv *top_mv;
845  VP56mv *left_mv = left_mb->bmv;
846  VP56mv *cur_mv = mb->bmv;
847 
848  if (!layout) // layout is inlined, s->mb_layout is not
849  top_mb = &mb[2];
850  else
851  top_mb = &mb[-s->mb_width - 1];
852  mbsplits_top = vp8_mbsplits[top_mb->partitioning];
853  top_mv = top_mb->bmv;
854 
858  else
859  part_idx = VP8_SPLITMVMODE_8x8;
860  } else {
861  part_idx = VP8_SPLITMVMODE_4x4;
862  }
863 
864  num = vp8_mbsplit_count[part_idx];
865  mbsplits_cur = vp8_mbsplits[part_idx],
866  firstidx = vp8_mbfirstidx[part_idx];
867  mb->partitioning = part_idx;
868 
869  for (n = 0; n < num; n++) {
870  int k = firstidx[n];
871  uint32_t left, above;
872  const uint8_t *submv_prob;
873 
874  if (!(k & 3))
875  left = AV_RN32A(&left_mv[mbsplits_left[k + 3]]);
876  else
877  left = AV_RN32A(&cur_mv[mbsplits_cur[k - 1]]);
878  if (k <= 3)
879  above = AV_RN32A(&top_mv[mbsplits_top[k + 12]]);
880  else
881  above = AV_RN32A(&cur_mv[mbsplits_cur[k - 4]]);
882 
883  submv_prob = get_submv_prob(left, above, is_vp7);
884 
885  if (vp56_rac_get_prob_branchy(c, submv_prob[0])) {
886  if (vp56_rac_get_prob_branchy(c, submv_prob[1])) {
887  if (vp56_rac_get_prob_branchy(c, submv_prob[2])) {
888  mb->bmv[n].y = mb->mv.y +
889  read_mv_component(c, s->prob->mvc[0], is_vp7);
890  mb->bmv[n].x = mb->mv.x +
891  read_mv_component(c, s->prob->mvc[1], is_vp7);
892  } else {
893  AV_ZERO32(&mb->bmv[n]);
894  }
895  } else {
896  AV_WN32A(&mb->bmv[n], above);
897  }
898  } else {
899  AV_WN32A(&mb->bmv[n], left);
900  }
901  }
902 
903  return num;
904 }
905 
906 /**
907  * The vp7 reference decoder uses a padding macroblock column (added to right
908  * edge of the frame) to guard against illegal macroblock offsets. The
909  * algorithm has bugs that permit offsets to straddle the padding column.
910  * This function replicates those bugs.
911  *
912  * @param[out] edge_x macroblock x address
913  * @param[out] edge_y macroblock y address
914  *
915  * @return macroblock offset legal (boolean)
916  */
917 static int vp7_calculate_mb_offset(int mb_x, int mb_y, int mb_width,
918  int xoffset, int yoffset, int boundary,
919  int *edge_x, int *edge_y)
920 {
921  int vwidth = mb_width + 1;
922  int new = (mb_y + yoffset) * vwidth + mb_x + xoffset;
923  if (new < boundary || new % vwidth == vwidth - 1)
924  return 0;
925  *edge_y = new / vwidth;
926  *edge_x = new % vwidth;
927  return 1;
928 }
929 
930 static const VP56mv *get_bmv_ptr(const VP8Macroblock *mb, int subblock)
931 {
932  return &mb->bmv[mb->mode == VP8_MVMODE_SPLIT ? vp8_mbsplits[mb->partitioning][subblock] : 0];
933 }
934 
935 static av_always_inline
937  int mb_x, int mb_y, int layout)
938 {
939  VP8Macroblock *mb_edge[12];
940  enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR };
941  enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
942  int idx = CNT_ZERO;
943  VP56mv near_mv[3];
944  uint8_t cnt[3] = { 0 };
945  VP56RangeCoder *c = &s->c;
946  int i;
947 
948  AV_ZERO32(&near_mv[0]);
949  AV_ZERO32(&near_mv[1]);
950  AV_ZERO32(&near_mv[2]);
951 
952  for (i = 0; i < VP7_MV_PRED_COUNT; i++) {
953  const VP7MVPred * pred = &vp7_mv_pred[i];
954  int edge_x, edge_y;
955 
956  if (vp7_calculate_mb_offset(mb_x, mb_y, s->mb_width, pred->xoffset,
957  pred->yoffset, !s->profile, &edge_x, &edge_y)) {
958  VP8Macroblock *edge = mb_edge[i] = (s->mb_layout == 1)
959  ? s->macroblocks_base + 1 + edge_x +
960  (s->mb_width + 1) * (edge_y + 1)
961  : s->macroblocks + edge_x +
962  (s->mb_height - edge_y - 1) * 2;
963  uint32_t mv = AV_RN32A(get_bmv_ptr(edge, vp7_mv_pred[i].subblock));
964  if (mv) {
965  if (AV_RN32A(&near_mv[CNT_NEAREST])) {
966  if (mv == AV_RN32A(&near_mv[CNT_NEAREST])) {
967  idx = CNT_NEAREST;
968  } else if (AV_RN32A(&near_mv[CNT_NEAR])) {
969  if (mv != AV_RN32A(&near_mv[CNT_NEAR]))
970  continue;
971  idx = CNT_NEAR;
972  } else {
973  AV_WN32A(&near_mv[CNT_NEAR], mv);
974  idx = CNT_NEAR;
975  }
976  } else {
977  AV_WN32A(&near_mv[CNT_NEAREST], mv);
978  idx = CNT_NEAREST;
979  }
980  } else {
981  idx = CNT_ZERO;
982  }
983  } else {
984  idx = CNT_ZERO;
985  }
986  cnt[idx] += vp7_mv_pred[i].score;
987  }
988 
990 
991  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_ZERO]][0])) {
992  mb->mode = VP8_MVMODE_MV;
993 
994  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAREST]][1])) {
995 
996  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAR]][2])) {
997 
998  if (cnt[CNT_NEAREST] > cnt[CNT_NEAR])
999  AV_WN32A(&mb->mv, cnt[CNT_ZERO] > cnt[CNT_NEAREST] ? 0 : AV_RN32A(&near_mv[CNT_NEAREST]));
1000  else
1001  AV_WN32A(&mb->mv, cnt[CNT_ZERO] > cnt[CNT_NEAR] ? 0 : AV_RN32A(&near_mv[CNT_NEAR]));
1002 
1003  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAR]][3])) {
1004  mb->mode = VP8_MVMODE_SPLIT;
1005  mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout, IS_VP7) - 1];
1006  } else {
1007  mb->mv.y += vp7_read_mv_component(c, s->prob->mvc[0]);
1008  mb->mv.x += vp7_read_mv_component(c, s->prob->mvc[1]);
1009  mb->bmv[0] = mb->mv;
1010  }
1011  } else {
1012  mb->mv = near_mv[CNT_NEAR];
1013  mb->bmv[0] = mb->mv;
1014  }
1015  } else {
1016  mb->mv = near_mv[CNT_NEAREST];
1017  mb->bmv[0] = mb->mv;
1018  }
1019  } else {
1020  mb->mode = VP8_MVMODE_ZERO;
1021  AV_ZERO32(&mb->mv);
1022  mb->bmv[0] = mb->mv;
1023  }
1024 }
1025 
1026 static av_always_inline
1028  int mb_x, int mb_y, int layout)
1029 {
1030  VP8Macroblock *mb_edge[3] = { 0 /* top */,
1031  mb - 1 /* left */,
1032  0 /* top-left */ };
1033  enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
1034  enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
1035  int idx = CNT_ZERO;
1036  int cur_sign_bias = s->sign_bias[mb->ref_frame];
1037  int8_t *sign_bias = s->sign_bias;
1038  VP56mv near_mv[4];
1039  uint8_t cnt[4] = { 0 };
1040  VP56RangeCoder *c = &s->c;
1041 
1042  if (!layout) { // layout is inlined (s->mb_layout is not)
1043  mb_edge[0] = mb + 2;
1044  mb_edge[2] = mb + 1;
1045  } else {
1046  mb_edge[0] = mb - s->mb_width - 1;
1047  mb_edge[2] = mb - s->mb_width - 2;
1048  }
1049 
1050  AV_ZERO32(&near_mv[0]);
1051  AV_ZERO32(&near_mv[1]);
1052  AV_ZERO32(&near_mv[2]);
1053 
1054  /* Process MB on top, left and top-left */
1055 #define MV_EDGE_CHECK(n) \
1056  { \
1057  VP8Macroblock *edge = mb_edge[n]; \
1058  int edge_ref = edge->ref_frame; \
1059  if (edge_ref != VP56_FRAME_CURRENT) { \
1060  uint32_t mv = AV_RN32A(&edge->mv); \
1061  if (mv) { \
1062  if (cur_sign_bias != sign_bias[edge_ref]) { \
1063  /* SWAR negate of the values in mv. */ \
1064  mv = ~mv; \
1065  mv = ((mv & 0x7fff7fff) + \
1066  0x00010001) ^ (mv & 0x80008000); \
1067  } \
1068  if (!n || mv != AV_RN32A(&near_mv[idx])) \
1069  AV_WN32A(&near_mv[++idx], mv); \
1070  cnt[idx] += 1 + (n != 2); \
1071  } else \
1072  cnt[CNT_ZERO] += 1 + (n != 2); \
1073  } \
1074  }
1075 
1076  MV_EDGE_CHECK(0)
1077  MV_EDGE_CHECK(1)
1078  MV_EDGE_CHECK(2)
1079 
1081  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_ZERO]][0])) {
1082  mb->mode = VP8_MVMODE_MV;
1083 
1084  /* If we have three distinct MVs, merge first and last if they're the same */
1085  if (cnt[CNT_SPLITMV] &&
1086  AV_RN32A(&near_mv[1 + VP8_EDGE_TOP]) == AV_RN32A(&near_mv[1 + VP8_EDGE_TOPLEFT]))
1087  cnt[CNT_NEAREST] += 1;
1088 
1089  /* Swap near and nearest if necessary */
1090  if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
1091  FFSWAP(uint8_t, cnt[CNT_NEAREST], cnt[CNT_NEAR]);
1092  FFSWAP( VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
1093  }
1094 
1095  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) {
1096  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAR]][2])) {
1097  /* Choose the best mv out of 0,0 and the nearest mv */
1098  clamp_mv(s, &mb->mv, &near_mv[CNT_ZERO + (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])]);
1099  cnt[CNT_SPLITMV] = ((mb_edge[VP8_EDGE_LEFT]->mode == VP8_MVMODE_SPLIT) +
1100  (mb_edge[VP8_EDGE_TOP]->mode == VP8_MVMODE_SPLIT)) * 2 +
1101  (mb_edge[VP8_EDGE_TOPLEFT]->mode == VP8_MVMODE_SPLIT);
1102 
1103  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_SPLITMV]][3])) {
1104  mb->mode = VP8_MVMODE_SPLIT;
1105  mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout, IS_VP8) - 1];
1106  } else {
1107  mb->mv.y += vp8_read_mv_component(c, s->prob->mvc[0]);
1108  mb->mv.x += vp8_read_mv_component(c, s->prob->mvc[1]);
1109  mb->bmv[0] = mb->mv;
1110  }
1111  } else {
1112  clamp_mv(s, &mb->mv, &near_mv[CNT_NEAR]);
1113  mb->bmv[0] = mb->mv;
1114  }
1115  } else {
1116  clamp_mv(s, &mb->mv, &near_mv[CNT_NEAREST]);
1117  mb->bmv[0] = mb->mv;
1118  }
1119  } else {
1120  mb->mode = VP8_MVMODE_ZERO;
1121  AV_ZERO32(&mb->mv);
1122  mb->bmv[0] = mb->mv;
1123  }
1124 }
1125 
1126 static av_always_inline
1128  int mb_x, int keyframe, int layout)
1129 {
1130  uint8_t *intra4x4 = mb->intra4x4_pred_mode_mb;
1131 
1132  if (layout) {
1133  VP8Macroblock *mb_top = mb - s->mb_width - 1;
1134  memcpy(mb->intra4x4_pred_mode_top, mb_top->intra4x4_pred_mode_top, 4);
1135  }
1136  if (keyframe) {
1137  int x, y;
1138  uint8_t *top;
1139  uint8_t *const left = s->intra4x4_pred_mode_left;
1140  if (layout)
1141  top = mb->intra4x4_pred_mode_top;
1142  else
1143  top = s->intra4x4_pred_mode_top + 4 * mb_x;
1144  for (y = 0; y < 4; y++) {
1145  for (x = 0; x < 4; x++) {
1146  const uint8_t *ctx;
1147  ctx = vp8_pred4x4_prob_intra[top[x]][left[y]];
1148  *intra4x4 = vp8_rac_get_tree(c, vp8_pred4x4_tree, ctx);
1149  left[y] = top[x] = *intra4x4;
1150  intra4x4++;
1151  }
1152  }
1153  } else {
1154  int i;
1155  for (i = 0; i < 16; i++)
1156  intra4x4[i] = vp8_rac_get_tree(c, vp8_pred4x4_tree,
1158  }
1159 }
1160 
1161 static av_always_inline
1162 void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
1163  uint8_t *segment, uint8_t *ref, int layout, int is_vp7)
1164 {
1165  VP56RangeCoder *c = &s->c;
1166  static const char *vp7_feature_name[] = { "q-index",
1167  "lf-delta",
1168  "partial-golden-update",
1169  "blit-pitch" };
1170  if (is_vp7) {
1171  int i;
1172  *segment = 0;
1173  for (i = 0; i < 4; i++) {
1174  if (s->feature_enabled[i]) {
1177  s->feature_index_prob[i]);
1179  "Feature %s present in macroblock (value 0x%x)\n",
1180  vp7_feature_name[i], s->feature_value[i][index]);
1181  }
1182  }
1183  }
1184  } else if (s->segmentation.update_map) {
1185  int bit = vp56_rac_get_prob(c, s->prob->segmentid[0]);
1186  *segment = vp56_rac_get_prob(c, s->prob->segmentid[1+bit]) + 2*bit;
1187  } else if (s->segmentation.enabled)
1188  *segment = ref ? *ref : *segment;
1189  mb->segment = *segment;
1190 
1191  mb->skip = s->mbskip_enabled ? vp56_rac_get_prob(c, s->prob->mbskip) : 0;
1192 
1193  if (s->keyframe) {
1196 
1197  if (mb->mode == MODE_I4x4) {
1198  decode_intra4x4_modes(s, c, mb, mb_x, 1, layout);
1199  } else {
1200  const uint32_t modes = (is_vp7 ? vp7_pred4x4_mode
1201  : vp8_pred4x4_mode)[mb->mode] * 0x01010101u;
1202  if (s->mb_layout)
1203  AV_WN32A(mb->intra4x4_pred_mode_top, modes);
1204  else
1205  AV_WN32A(s->intra4x4_pred_mode_top + 4 * mb_x, modes);
1206  AV_WN32A(s->intra4x4_pred_mode_left, modes);
1207  }
1208 
1212  } else if (vp56_rac_get_prob_branchy(c, s->prob->intra)) {
1213  // inter MB, 16.2
1214  if (vp56_rac_get_prob_branchy(c, s->prob->last))
1215  mb->ref_frame =
1216  (!is_vp7 && vp56_rac_get_prob(c, s->prob->golden)) ? VP56_FRAME_GOLDEN2 /* altref */
1218  else
1220  s->ref_count[mb->ref_frame - 1]++;
1221 
1222  // motion vectors, 16.3
1223  if (is_vp7)
1224  vp7_decode_mvs(s, mb, mb_x, mb_y, layout);
1225  else
1226  vp8_decode_mvs(s, mb, mb_x, mb_y, layout);
1227  } else {
1228  // intra MB, 16.1
1230 
1231  if (mb->mode == MODE_I4x4)
1232  decode_intra4x4_modes(s, c, mb, mb_x, 0, layout);
1233 
1235  s->prob->pred8x8c);
1238  AV_ZERO32(&mb->bmv[0]);
1239  }
1240 }
1241 
1242 /**
1243  * @param r arithmetic bitstream reader context
1244  * @param block destination for block coefficients
1245  * @param probs probabilities to use when reading trees from the bitstream
1246  * @param i initial coeff index, 0 unless a separate DC block is coded
1247  * @param qmul array holding the dc/ac dequant factor at position 0/1
1248  *
1249  * @return 0 if no coeffs were decoded
1250  * otherwise, the index of the last coeff decoded plus one
1251  */
1252 static av_always_inline
1254  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1255  int i, uint8_t *token_prob, int16_t qmul[2],
1256  const uint8_t scan[16], int vp7)
1257 {
1258  VP56RangeCoder c = *r;
1259  goto skip_eob;
1260  do {
1261  int coeff;
1262 restart:
1263  if (!vp56_rac_get_prob_branchy(&c, token_prob[0])) // DCT_EOB
1264  break;
1265 
1266 skip_eob:
1267  if (!vp56_rac_get_prob_branchy(&c, token_prob[1])) { // DCT_0
1268  if (++i == 16)
1269  break; // invalid input; blocks should end with EOB
1270  token_prob = probs[i][0];
1271  if (vp7)
1272  goto restart;
1273  goto skip_eob;
1274  }
1275 
1276  if (!vp56_rac_get_prob_branchy(&c, token_prob[2])) { // DCT_1
1277  coeff = 1;
1278  token_prob = probs[i + 1][1];
1279  } else {
1280  if (!vp56_rac_get_prob_branchy(&c, token_prob[3])) { // DCT 2,3,4
1281  coeff = vp56_rac_get_prob_branchy(&c, token_prob[4]);
1282  if (coeff)
1283  coeff += vp56_rac_get_prob(&c, token_prob[5]);
1284  coeff += 2;
1285  } else {
1286  // DCT_CAT*
1287  if (!vp56_rac_get_prob_branchy(&c, token_prob[6])) {
1288  if (!vp56_rac_get_prob_branchy(&c, token_prob[7])) { // DCT_CAT1
1289  coeff = 5 + vp56_rac_get_prob(&c, vp8_dct_cat1_prob[0]);
1290  } else { // DCT_CAT2
1291  coeff = 7;
1292  coeff += vp56_rac_get_prob(&c, vp8_dct_cat2_prob[0]) << 1;
1293  coeff += vp56_rac_get_prob(&c, vp8_dct_cat2_prob[1]);
1294  }
1295  } else { // DCT_CAT3 and up
1296  int a = vp56_rac_get_prob(&c, token_prob[8]);
1297  int b = vp56_rac_get_prob(&c, token_prob[9 + a]);
1298  int cat = (a << 1) + b;
1299  coeff = 3 + (8 << cat);
1300  coeff += vp8_rac_get_coeff(&c, ff_vp8_dct_cat_prob[cat]);
1301  }
1302  }
1303  token_prob = probs[i + 1][2];
1304  }
1305  block[scan[i]] = (vp8_rac_get(&c) ? -coeff : coeff) * qmul[!!i];
1306  } while (++i < 16);
1307 
1308  *r = c;
1309  return i;
1310 }
1311 
1312 static av_always_inline
1313 int inter_predict_dc(int16_t block[16], int16_t pred[2])
1314 {
1315  int16_t dc = block[0];
1316  int ret = 0;
1317 
1318  if (pred[1] > 3) {
1319  dc += pred[0];
1320  ret = 1;
1321  }
1322 
1323  if (!pred[0] | !dc | ((int32_t)pred[0] ^ (int32_t)dc) >> 31) {
1324  block[0] = pred[0] = dc;
1325  pred[1] = 0;
1326  } else {
1327  if (pred[0] == dc)
1328  pred[1]++;
1329  block[0] = pred[0] = dc;
1330  }
1331 
1332  return ret;
1333 }
1334 
1336  int16_t block[16],
1337  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1338  int i, uint8_t *token_prob,
1339  int16_t qmul[2],
1340  const uint8_t scan[16])
1341 {
1342  return decode_block_coeffs_internal(r, block, probs, i,
1343  token_prob, qmul, scan, IS_VP7);
1344 }
1345 
1346 #ifndef vp8_decode_block_coeffs_internal
1348  int16_t block[16],
1349  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1350  int i, uint8_t *token_prob,
1351  int16_t qmul[2])
1352 {
1353  return decode_block_coeffs_internal(r, block, probs, i,
1354  token_prob, qmul, ff_zigzag_scan, IS_VP8);
1355 }
1356 #endif
1357 
1358 /**
1359  * @param c arithmetic bitstream reader context
1360  * @param block destination for block coefficients
1361  * @param probs probabilities to use when reading trees from the bitstream
1362  * @param i initial coeff index, 0 unless a separate DC block is coded
1363  * @param zero_nhood the initial prediction context for number of surrounding
1364  * all-zero blocks (only left/top, so 0-2)
1365  * @param qmul array holding the dc/ac dequant factor at position 0/1
1366  * @param scan scan pattern (VP7 only)
1367  *
1368  * @return 0 if no coeffs were decoded
1369  * otherwise, the index of the last coeff decoded plus one
1370  */
1371 static av_always_inline
1373  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1374  int i, int zero_nhood, int16_t qmul[2],
1375  const uint8_t scan[16], int vp7)
1376 {
1377  uint8_t *token_prob = probs[i][zero_nhood];
1378  if (!vp56_rac_get_prob_branchy(c, token_prob[0])) // DCT_EOB
1379  return 0;
1380  return vp7 ? vp7_decode_block_coeffs_internal(c, block, probs, i,
1381  token_prob, qmul, scan)
1382  : vp8_decode_block_coeffs_internal(c, block, probs, i,
1383  token_prob, qmul);
1384 }
1385 
1386 static av_always_inline
1388  VP8Macroblock *mb, uint8_t t_nnz[9], uint8_t l_nnz[9],
1389  int is_vp7)
1390 {
1391  int i, x, y, luma_start = 0, luma_ctx = 3;
1392  int nnz_pred, nnz, nnz_total = 0;
1393  int segment = mb->segment;
1394  int block_dc = 0;
1395 
1396  if (mb->mode != MODE_I4x4 && (is_vp7 || mb->mode != VP8_MVMODE_SPLIT)) {
1397  nnz_pred = t_nnz[8] + l_nnz[8];
1398 
1399  // decode DC values and do hadamard
1400  nnz = decode_block_coeffs(c, td->block_dc, s->prob->token[1], 0,
1401  nnz_pred, s->qmat[segment].luma_dc_qmul,
1402  ff_zigzag_scan, is_vp7);
1403  l_nnz[8] = t_nnz[8] = !!nnz;
1404 
1405  if (is_vp7 && mb->mode > MODE_I4x4) {
1406  nnz |= inter_predict_dc(td->block_dc,
1407  s->inter_dc_pred[mb->ref_frame - 1]);
1408  }
1409 
1410  if (nnz) {
1411  nnz_total += nnz;
1412  block_dc = 1;
1413  if (nnz == 1)
1414  s->vp8dsp.vp8_luma_dc_wht_dc(td->block, td->block_dc);
1415  else
1416  s->vp8dsp.vp8_luma_dc_wht(td->block, td->block_dc);
1417  }
1418  luma_start = 1;
1419  luma_ctx = 0;
1420  }
1421 
1422  // luma blocks
1423  for (y = 0; y < 4; y++)
1424  for (x = 0; x < 4; x++) {
1425  nnz_pred = l_nnz[y] + t_nnz[x];
1426  nnz = decode_block_coeffs(c, td->block[y][x],
1427  s->prob->token[luma_ctx],
1428  luma_start, nnz_pred,
1429  s->qmat[segment].luma_qmul,
1430  s->prob[0].scan, is_vp7);
1431  /* nnz+block_dc may be one more than the actual last index,
1432  * but we don't care */
1433  td->non_zero_count_cache[y][x] = nnz + block_dc;
1434  t_nnz[x] = l_nnz[y] = !!nnz;
1435  nnz_total += nnz;
1436  }
1437 
1438  // chroma blocks
1439  // TODO: what to do about dimensions? 2nd dim for luma is x,
1440  // but for chroma it's (y<<1)|x
1441  for (i = 4; i < 6; i++)
1442  for (y = 0; y < 2; y++)
1443  for (x = 0; x < 2; x++) {
1444  nnz_pred = l_nnz[i + 2 * y] + t_nnz[i + 2 * x];
1445  nnz = decode_block_coeffs(c, td->block[i][(y << 1) + x],
1446  s->prob->token[2], 0, nnz_pred,
1447  s->qmat[segment].chroma_qmul,
1448  s->prob[0].scan, is_vp7);
1449  td->non_zero_count_cache[i][(y << 1) + x] = nnz;
1450  t_nnz[i + 2 * x] = l_nnz[i + 2 * y] = !!nnz;
1451  nnz_total += nnz;
1452  }
1453 
1454  // if there were no coded coeffs despite the macroblock not being marked skip,
1455  // we MUST not do the inner loop filter and should not do IDCT
1456  // Since skip isn't used for bitstream prediction, just manually set it.
1457  if (!nnz_total)
1458  mb->skip = 1;
1459 }
1460 
1461 static av_always_inline
1462 void backup_mb_border(uint8_t *top_border, uint8_t *src_y,
1463  uint8_t *src_cb, uint8_t *src_cr,
1464  int linesize, int uvlinesize, int simple)
1465 {
1466  AV_COPY128(top_border, src_y + 15 * linesize);
1467  if (!simple) {
1468  AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
1469  AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
1470  }
1471 }
1472 
1473 static av_always_inline
1474 void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb,
1475  uint8_t *src_cr, int linesize, int uvlinesize, int mb_x,
1476  int mb_y, int mb_width, int simple, int xchg)
1477 {
1478  uint8_t *top_border_m1 = top_border - 32; // for TL prediction
1479  src_y -= linesize;
1480  src_cb -= uvlinesize;
1481  src_cr -= uvlinesize;
1482 
1483 #define XCHG(a, b, xchg) \
1484  do { \
1485  if (xchg) \
1486  AV_SWAP64(b, a); \
1487  else \
1488  AV_COPY64(b, a); \
1489  } while (0)
1490 
1491  XCHG(top_border_m1 + 8, src_y - 8, xchg);
1492  XCHG(top_border, src_y, xchg);
1493  XCHG(top_border + 8, src_y + 8, 1);
1494  if (mb_x < mb_width - 1)
1495  XCHG(top_border + 32, src_y + 16, 1);
1496 
1497  // only copy chroma for normal loop filter
1498  // or to initialize the top row to 127
1499  if (!simple || !mb_y) {
1500  XCHG(top_border_m1 + 16, src_cb - 8, xchg);
1501  XCHG(top_border_m1 + 24, src_cr - 8, xchg);
1502  XCHG(top_border + 16, src_cb, 1);
1503  XCHG(top_border + 24, src_cr, 1);
1504  }
1505 }
1506 
1507 static av_always_inline
1508 int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
1509 {
1510  if (!mb_x)
1511  return mb_y ? TOP_DC_PRED8x8 : DC_128_PRED8x8;
1512  else
1513  return mb_y ? mode : LEFT_DC_PRED8x8;
1514 }
1515 
1516 static av_always_inline
1517 int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y, int vp7)
1518 {
1519  if (!mb_x)
1520  return mb_y ? VERT_PRED8x8 : (vp7 ? DC_128_PRED8x8 : DC_129_PRED8x8);
1521  else
1522  return mb_y ? mode : HOR_PRED8x8;
1523 }
1524 
1525 static av_always_inline
1526 int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y, int vp7)
1527 {
1528  switch (mode) {
1529  case DC_PRED8x8:
1530  return check_dc_pred8x8_mode(mode, mb_x, mb_y);
1531  case VERT_PRED8x8:
1532  return !mb_y ? (vp7 ? DC_128_PRED8x8 : DC_127_PRED8x8) : mode;
1533  case HOR_PRED8x8:
1534  return !mb_x ? (vp7 ? DC_128_PRED8x8 : DC_129_PRED8x8) : mode;
1535  case PLANE_PRED8x8: /* TM */
1536  return check_tm_pred8x8_mode(mode, mb_x, mb_y, vp7);
1537  }
1538  return mode;
1539 }
1540 
1541 static av_always_inline
1542 int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y, int vp7)
1543 {
1544  if (!mb_x) {
1545  return mb_y ? VERT_VP8_PRED : (vp7 ? DC_128_PRED : DC_129_PRED);
1546  } else {
1547  return mb_y ? mode : HOR_VP8_PRED;
1548  }
1549 }
1550 
1551 static av_always_inline
1552 int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y,
1553  int *copy_buf, int vp7)
1554 {
1555  switch (mode) {
1556  case VERT_PRED:
1557  if (!mb_x && mb_y) {
1558  *copy_buf = 1;
1559  return mode;
1560  }
1561  /* fall-through */
1562  case DIAG_DOWN_LEFT_PRED:
1563  case VERT_LEFT_PRED:
1564  return !mb_y ? (vp7 ? DC_128_PRED : DC_127_PRED) : mode;
1565  case HOR_PRED:
1566  if (!mb_y) {
1567  *copy_buf = 1;
1568  return mode;
1569  }
1570  /* fall-through */
1571  case HOR_UP_PRED:
1572  return !mb_x ? (vp7 ? DC_128_PRED : DC_129_PRED) : mode;
1573  case TM_VP8_PRED:
1574  return check_tm_pred4x4_mode(mode, mb_x, mb_y, vp7);
1575  case DC_PRED: /* 4x4 DC doesn't use the same "H.264-style" exceptions
1576  * as 16x16/8x8 DC */
1577  case DIAG_DOWN_RIGHT_PRED:
1578  case VERT_RIGHT_PRED:
1579  case HOR_DOWN_PRED:
1580  if (!mb_y || !mb_x)
1581  *copy_buf = 1;
1582  return mode;
1583  }
1584  return mode;
1585 }
1586 
1587 static av_always_inline
1589  VP8Macroblock *mb, int mb_x, int mb_y, int is_vp7)
1590 {
1591  int x, y, mode, nnz;
1592  uint32_t tr;
1593 
1594  /* for the first row, we need to run xchg_mb_border to init the top edge
1595  * to 127 otherwise, skip it if we aren't going to deblock */
1596  if (mb_y && (s->deblock_filter || !mb_y) && td->thread_nr == 0)
1597  xchg_mb_border(s->top_border[mb_x + 1], dst[0], dst[1], dst[2],
1598  s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1599  s->filter.simple, 1);
1600 
1601  if (mb->mode < MODE_I4x4) {
1602  mode = check_intra_pred8x8_mode_emuedge(mb->mode, mb_x, mb_y, is_vp7);
1603  s->hpc.pred16x16[mode](dst[0], s->linesize);
1604  } else {
1605  uint8_t *ptr = dst[0];
1606  uint8_t *intra4x4 = mb->intra4x4_pred_mode_mb;
1607  const uint8_t lo = is_vp7 ? 128 : 127;
1608  const uint8_t hi = is_vp7 ? 128 : 129;
1609  uint8_t tr_top[4] = { lo, lo, lo, lo };
1610 
1611  // all blocks on the right edge of the macroblock use bottom edge
1612  // the top macroblock for their topright edge
1613  uint8_t *tr_right = ptr - s->linesize + 16;
1614 
1615  // if we're on the right edge of the frame, said edge is extended
1616  // from the top macroblock
1617  if (mb_y && mb_x == s->mb_width - 1) {
1618  tr = tr_right[-1] * 0x01010101u;
1619  tr_right = (uint8_t *) &tr;
1620  }
1621 
1622  if (mb->skip)
1624 
1625  for (y = 0; y < 4; y++) {
1626  uint8_t *topright = ptr + 4 - s->linesize;
1627  for (x = 0; x < 4; x++) {
1628  int copy = 0, linesize = s->linesize;
1629  uint8_t *dst = ptr + 4 * x;
1630  LOCAL_ALIGNED(4, uint8_t, copy_dst, [5 * 8]);
1631 
1632  if ((y == 0 || x == 3) && mb_y == 0) {
1633  topright = tr_top;
1634  } else if (x == 3)
1635  topright = tr_right;
1636 
1637  mode = check_intra_pred4x4_mode_emuedge(intra4x4[x], mb_x + x,
1638  mb_y + y, &copy, is_vp7);
1639  if (copy) {
1640  dst = copy_dst + 12;
1641  linesize = 8;
1642  if (!(mb_y + y)) {
1643  copy_dst[3] = lo;
1644  AV_WN32A(copy_dst + 4, lo * 0x01010101U);
1645  } else {
1646  AV_COPY32(copy_dst + 4, ptr + 4 * x - s->linesize);
1647  if (!(mb_x + x)) {
1648  copy_dst[3] = hi;
1649  } else {
1650  copy_dst[3] = ptr[4 * x - s->linesize - 1];
1651  }
1652  }
1653  if (!(mb_x + x)) {
1654  copy_dst[11] =
1655  copy_dst[19] =
1656  copy_dst[27] =
1657  copy_dst[35] = hi;
1658  } else {
1659  copy_dst[11] = ptr[4 * x - 1];
1660  copy_dst[19] = ptr[4 * x + s->linesize - 1];
1661  copy_dst[27] = ptr[4 * x + s->linesize * 2 - 1];
1662  copy_dst[35] = ptr[4 * x + s->linesize * 3 - 1];
1663  }
1664  }
1665  s->hpc.pred4x4[mode](dst, topright, linesize);
1666  if (copy) {
1667  AV_COPY32(ptr + 4 * x, copy_dst + 12);
1668  AV_COPY32(ptr + 4 * x + s->linesize, copy_dst + 20);
1669  AV_COPY32(ptr + 4 * x + s->linesize * 2, copy_dst + 28);
1670  AV_COPY32(ptr + 4 * x + s->linesize * 3, copy_dst + 36);
1671  }
1672 
1673  nnz = td->non_zero_count_cache[y][x];
1674  if (nnz) {
1675  if (nnz == 1)
1676  s->vp8dsp.vp8_idct_dc_add(ptr + 4 * x,
1677  td->block[y][x], s->linesize);
1678  else
1679  s->vp8dsp.vp8_idct_add(ptr + 4 * x,
1680  td->block[y][x], s->linesize);
1681  }
1682  topright += 4;
1683  }
1684 
1685  ptr += 4 * s->linesize;
1686  intra4x4 += 4;
1687  }
1688  }
1689 
1691  mb_x, mb_y, is_vp7);
1692  s->hpc.pred8x8[mode](dst[1], s->uvlinesize);
1693  s->hpc.pred8x8[mode](dst[2], s->uvlinesize);
1694 
1695  if (mb_y && (s->deblock_filter || !mb_y) && td->thread_nr == 0)
1696  xchg_mb_border(s->top_border[mb_x + 1], dst[0], dst[1], dst[2],
1697  s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1698  s->filter.simple, 0);
1699 }
1700 
1701 static const uint8_t subpel_idx[3][8] = {
1702  { 0, 1, 2, 1, 2, 1, 2, 1 }, // nr. of left extra pixels,
1703  // also function pointer index
1704  { 0, 3, 5, 3, 5, 3, 5, 3 }, // nr. of extra pixels required
1705  { 0, 2, 3, 2, 3, 2, 3, 2 }, // nr. of right extra pixels
1706 };
1707 
1708 /**
1709  * luma MC function
1710  *
1711  * @param s VP8 decoding context
1712  * @param dst target buffer for block data at block position
1713  * @param ref reference picture buffer at origin (0, 0)
1714  * @param mv motion vector (relative to block position) to get pixel data from
1715  * @param x_off horizontal position of block from origin (0, 0)
1716  * @param y_off vertical position of block from origin (0, 0)
1717  * @param block_w width of block (16, 8 or 4)
1718  * @param block_h height of block (always same as block_w)
1719  * @param width width of src/dst plane data
1720  * @param height height of src/dst plane data
1721  * @param linesize size of a single line of plane data, including padding
1722  * @param mc_func motion compensation function pointers (bilinear or sixtap MC)
1723  */
1724 static av_always_inline
1726  ThreadFrame *ref, const VP56mv *mv,
1727  int x_off, int y_off, int block_w, int block_h,
1728  int width, int height, ptrdiff_t linesize,
1729  vp8_mc_func mc_func[3][3])
1730 {
1731  uint8_t *src = ref->f->data[0];
1732 
1733  if (AV_RN32A(mv)) {
1734  int src_linesize = linesize;
1735 
1736  int mx = (mv->x * 2) & 7, mx_idx = subpel_idx[0][mx];
1737  int my = (mv->y * 2) & 7, my_idx = subpel_idx[0][my];
1738 
1739  x_off += mv->x >> 2;
1740  y_off += mv->y >> 2;
1741 
1742  // edge emulation
1743  ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 4, 0);
1744  src += y_off * linesize + x_off;
1745  if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1746  y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1748  src - my_idx * linesize - mx_idx,
1749  EDGE_EMU_LINESIZE, linesize,
1750  block_w + subpel_idx[1][mx],
1751  block_h + subpel_idx[1][my],
1752  x_off - mx_idx, y_off - my_idx,
1753  width, height);
1754  src = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1755  src_linesize = EDGE_EMU_LINESIZE;
1756  }
1757  mc_func[my_idx][mx_idx](dst, linesize, src, src_linesize, block_h, mx, my);
1758  } else {
1759  ff_thread_await_progress(ref, (3 + y_off + block_h) >> 4, 0);
1760  mc_func[0][0](dst, linesize, src + y_off * linesize + x_off,
1761  linesize, block_h, 0, 0);
1762  }
1763 }
1764 
1765 /**
1766  * chroma MC function
1767  *
1768  * @param s VP8 decoding context
1769  * @param dst1 target buffer for block data at block position (U plane)
1770  * @param dst2 target buffer for block data at block position (V plane)
1771  * @param ref reference picture buffer at origin (0, 0)
1772  * @param mv motion vector (relative to block position) to get pixel data from
1773  * @param x_off horizontal position of block from origin (0, 0)
1774  * @param y_off vertical position of block from origin (0, 0)
1775  * @param block_w width of block (16, 8 or 4)
1776  * @param block_h height of block (always same as block_w)
1777  * @param width width of src/dst plane data
1778  * @param height height of src/dst plane data
1779  * @param linesize size of a single line of plane data, including padding
1780  * @param mc_func motion compensation function pointers (bilinear or sixtap MC)
1781  */
1782 static av_always_inline
1784  uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv,
1785  int x_off, int y_off, int block_w, int block_h,
1786  int width, int height, ptrdiff_t linesize,
1787  vp8_mc_func mc_func[3][3])
1788 {
1789  uint8_t *src1 = ref->f->data[1], *src2 = ref->f->data[2];
1790 
1791  if (AV_RN32A(mv)) {
1792  int mx = mv->x & 7, mx_idx = subpel_idx[0][mx];
1793  int my = mv->y & 7, my_idx = subpel_idx[0][my];
1794 
1795  x_off += mv->x >> 3;
1796  y_off += mv->y >> 3;
1797 
1798  // edge emulation
1799  src1 += y_off * linesize + x_off;
1800  src2 += y_off * linesize + x_off;
1801  ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 3, 0);
1802  if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1803  y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1805  src1 - my_idx * linesize - mx_idx,
1806  EDGE_EMU_LINESIZE, linesize,
1807  block_w + subpel_idx[1][mx],
1808  block_h + subpel_idx[1][my],
1809  x_off - mx_idx, y_off - my_idx, width, height);
1810  src1 = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1811  mc_func[my_idx][mx_idx](dst1, linesize, src1, EDGE_EMU_LINESIZE, block_h, mx, my);
1812 
1814  src2 - my_idx * linesize - mx_idx,
1815  EDGE_EMU_LINESIZE, linesize,
1816  block_w + subpel_idx[1][mx],
1817  block_h + subpel_idx[1][my],
1818  x_off - mx_idx, y_off - my_idx, width, height);
1819  src2 = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1820  mc_func[my_idx][mx_idx](dst2, linesize, src2, EDGE_EMU_LINESIZE, block_h, mx, my);
1821  } else {
1822  mc_func[my_idx][mx_idx](dst1, linesize, src1, linesize, block_h, mx, my);
1823  mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my);
1824  }
1825  } else {
1826  ff_thread_await_progress(ref, (3 + y_off + block_h) >> 3, 0);
1827  mc_func[0][0](dst1, linesize, src1 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1828  mc_func[0][0](dst2, linesize, src2 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1829  }
1830 }
1831 
1832 static av_always_inline
1834  ThreadFrame *ref_frame, int x_off, int y_off,
1835  int bx_off, int by_off, int block_w, int block_h,
1836  int width, int height, VP56mv *mv)
1837 {
1838  VP56mv uvmv = *mv;
1839 
1840  /* Y */
1841  vp8_mc_luma(s, td, dst[0] + by_off * s->linesize + bx_off,
1842  ref_frame, mv, x_off + bx_off, y_off + by_off,
1843  block_w, block_h, width, height, s->linesize,
1844  s->put_pixels_tab[block_w == 8]);
1845 
1846  /* U/V */
1847  if (s->profile == 3) {
1848  /* this block only applies VP8; it is safe to check
1849  * only the profile, as VP7 profile <= 1 */
1850  uvmv.x &= ~7;
1851  uvmv.y &= ~7;
1852  }
1853  x_off >>= 1;
1854  y_off >>= 1;
1855  bx_off >>= 1;
1856  by_off >>= 1;
1857  width >>= 1;
1858  height >>= 1;
1859  block_w >>= 1;
1860  block_h >>= 1;
1861  vp8_mc_chroma(s, td, dst[1] + by_off * s->uvlinesize + bx_off,
1862  dst[2] + by_off * s->uvlinesize + bx_off, ref_frame,
1863  &uvmv, x_off + bx_off, y_off + by_off,
1864  block_w, block_h, width, height, s->uvlinesize,
1865  s->put_pixels_tab[1 + (block_w == 4)]);
1866 }
1867 
1868 /* Fetch pixels for estimated mv 4 macroblocks ahead.
1869  * Optimized for 64-byte cache lines. Inspired by ffh264 prefetch_motion. */
1870 static av_always_inline
1871 void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
1872  int mb_xy, int ref)
1873 {
1874  /* Don't prefetch refs that haven't been used very often this frame. */
1875  if (s->ref_count[ref - 1] > (mb_xy >> 5)) {
1876  int x_off = mb_x << 4, y_off = mb_y << 4;
1877  int mx = (mb->mv.x >> 2) + x_off + 8;
1878  int my = (mb->mv.y >> 2) + y_off;
1879  uint8_t **src = s->framep[ref]->tf.f->data;
1880  int off = mx + (my + (mb_x & 3) * 4) * s->linesize + 64;
1881  /* For threading, a ff_thread_await_progress here might be useful, but
1882  * it actually slows down the decoder. Since a bad prefetch doesn't
1883  * generate bad decoder output, we don't run it here. */
1884  s->vdsp.prefetch(src[0] + off, s->linesize, 4);
1885  off = (mx >> 1) + ((my >> 1) + (mb_x & 7)) * s->uvlinesize + 64;
1886  s->vdsp.prefetch(src[1] + off, src[2] - src[1], 2);
1887  }
1888 }
1889 
1890 /**
1891  * Apply motion vectors to prediction buffer, chapter 18.
1892  */
1893 static av_always_inline
1895  VP8Macroblock *mb, int mb_x, int mb_y)
1896 {
1897  int x_off = mb_x << 4, y_off = mb_y << 4;
1898  int width = 16 * s->mb_width, height = 16 * s->mb_height;
1899  ThreadFrame *ref = &s->framep[mb->ref_frame]->tf;
1900  VP56mv *bmv = mb->bmv;
1901 
1902  switch (mb->partitioning) {
1903  case VP8_SPLITMVMODE_NONE:
1904  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1905  0, 0, 16, 16, width, height, &mb->mv);
1906  break;
1907  case VP8_SPLITMVMODE_4x4: {
1908  int x, y;
1909  VP56mv uvmv;
1910 
1911  /* Y */
1912  for (y = 0; y < 4; y++) {
1913  for (x = 0; x < 4; x++) {
1914  vp8_mc_luma(s, td, dst[0] + 4 * y * s->linesize + x * 4,
1915  ref, &bmv[4 * y + x],
1916  4 * x + x_off, 4 * y + y_off, 4, 4,
1917  width, height, s->linesize,
1918  s->put_pixels_tab[2]);
1919  }
1920  }
1921 
1922  /* U/V */
1923  x_off >>= 1;
1924  y_off >>= 1;
1925  width >>= 1;
1926  height >>= 1;
1927  for (y = 0; y < 2; y++) {
1928  for (x = 0; x < 2; x++) {
1929  uvmv.x = mb->bmv[2 * y * 4 + 2 * x ].x +
1930  mb->bmv[2 * y * 4 + 2 * x + 1].x +
1931  mb->bmv[(2 * y + 1) * 4 + 2 * x ].x +
1932  mb->bmv[(2 * y + 1) * 4 + 2 * x + 1].x;
1933  uvmv.y = mb->bmv[2 * y * 4 + 2 * x ].y +
1934  mb->bmv[2 * y * 4 + 2 * x + 1].y +
1935  mb->bmv[(2 * y + 1) * 4 + 2 * x ].y +
1936  mb->bmv[(2 * y + 1) * 4 + 2 * x + 1].y;
1937  uvmv.x = (uvmv.x + 2 + FF_SIGNBIT(uvmv.x)) >> 2;
1938  uvmv.y = (uvmv.y + 2 + FF_SIGNBIT(uvmv.y)) >> 2;
1939  if (s->profile == 3) {
1940  uvmv.x &= ~7;
1941  uvmv.y &= ~7;
1942  }
1943  vp8_mc_chroma(s, td, dst[1] + 4 * y * s->uvlinesize + x * 4,
1944  dst[2] + 4 * y * s->uvlinesize + x * 4, ref,
1945  &uvmv, 4 * x + x_off, 4 * y + y_off, 4, 4,
1946  width, height, s->uvlinesize,
1947  s->put_pixels_tab[2]);
1948  }
1949  }
1950  break;
1951  }
1952  case VP8_SPLITMVMODE_16x8:
1953  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1954  0, 0, 16, 8, width, height, &bmv[0]);
1955  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1956  0, 8, 16, 8, width, height, &bmv[1]);
1957  break;
1958  case VP8_SPLITMVMODE_8x16:
1959  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1960  0, 0, 8, 16, width, height, &bmv[0]);
1961  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1962  8, 0, 8, 16, width, height, &bmv[1]);
1963  break;
1964  case VP8_SPLITMVMODE_8x8:
1965  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1966  0, 0, 8, 8, width, height, &bmv[0]);
1967  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1968  8, 0, 8, 8, width, height, &bmv[1]);
1969  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1970  0, 8, 8, 8, width, height, &bmv[2]);
1971  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1972  8, 8, 8, 8, width, height, &bmv[3]);
1973  break;
1974  }
1975 }
1976 
1977 static av_always_inline
1979 {
1980  int x, y, ch;
1981 
1982  if (mb->mode != MODE_I4x4) {
1983  uint8_t *y_dst = dst[0];
1984  for (y = 0; y < 4; y++) {
1985  uint32_t nnz4 = AV_RL32(td->non_zero_count_cache[y]);
1986  if (nnz4) {
1987  if (nnz4 & ~0x01010101) {
1988  for (x = 0; x < 4; x++) {
1989  if ((uint8_t) nnz4 == 1)
1990  s->vp8dsp.vp8_idct_dc_add(y_dst + 4 * x,
1991  td->block[y][x],
1992  s->linesize);
1993  else if ((uint8_t) nnz4 > 1)
1994  s->vp8dsp.vp8_idct_add(y_dst + 4 * x,
1995  td->block[y][x],
1996  s->linesize);
1997  nnz4 >>= 8;
1998  if (!nnz4)
1999  break;
2000  }
2001  } else {
2002  s->vp8dsp.vp8_idct_dc_add4y(y_dst, td->block[y], s->linesize);
2003  }
2004  }
2005  y_dst += 4 * s->linesize;
2006  }
2007  }
2008 
2009  for (ch = 0; ch < 2; ch++) {
2010  uint32_t nnz4 = AV_RL32(td->non_zero_count_cache[4 + ch]);
2011  if (nnz4) {
2012  uint8_t *ch_dst = dst[1 + ch];
2013  if (nnz4 & ~0x01010101) {
2014  for (y = 0; y < 2; y++) {
2015  for (x = 0; x < 2; x++) {
2016  if ((uint8_t) nnz4 == 1)
2017  s->vp8dsp.vp8_idct_dc_add(ch_dst + 4 * x,
2018  td->block[4 + ch][(y << 1) + x],
2019  s->uvlinesize);
2020  else if ((uint8_t) nnz4 > 1)
2021  s->vp8dsp.vp8_idct_add(ch_dst + 4 * x,
2022  td->block[4 + ch][(y << 1) + x],
2023  s->uvlinesize);
2024  nnz4 >>= 8;
2025  if (!nnz4)
2026  goto chroma_idct_end;
2027  }
2028  ch_dst += 4 * s->uvlinesize;
2029  }
2030  } else {
2031  s->vp8dsp.vp8_idct_dc_add4uv(ch_dst, td->block[4 + ch], s->uvlinesize);
2032  }
2033  }
2034 chroma_idct_end:
2035  ;
2036  }
2037 }
2038 
2039 static av_always_inline
2041  VP8FilterStrength *f, int is_vp7)
2042 {
2043  int interior_limit, filter_level;
2044 
2045  if (s->segmentation.enabled) {
2046  filter_level = s->segmentation.filter_level[mb->segment];
2047  if (!s->segmentation.absolute_vals)
2048  filter_level += s->filter.level;
2049  } else
2050  filter_level = s->filter.level;
2051 
2052  if (s->lf_delta.enabled) {
2053  filter_level += s->lf_delta.ref[mb->ref_frame];
2054  filter_level += s->lf_delta.mode[mb->mode];
2055  }
2056 
2057  filter_level = av_clip_uintp2(filter_level, 6);
2058 
2059  interior_limit = filter_level;
2060  if (s->filter.sharpness) {
2061  interior_limit >>= (s->filter.sharpness + 3) >> 2;
2062  interior_limit = FFMIN(interior_limit, 9 - s->filter.sharpness);
2063  }
2064  interior_limit = FFMAX(interior_limit, 1);
2065 
2066  f->filter_level = filter_level;
2067  f->inner_limit = interior_limit;
2068  f->inner_filter = is_vp7 || !mb->skip || mb->mode == MODE_I4x4 ||
2069  mb->mode == VP8_MVMODE_SPLIT;
2070 }
2071 
2072 static av_always_inline
2074  int mb_x, int mb_y, int is_vp7)
2075 {
2076  int mbedge_lim, bedge_lim_y, bedge_lim_uv, hev_thresh;
2077  int filter_level = f->filter_level;
2078  int inner_limit = f->inner_limit;
2079  int inner_filter = f->inner_filter;
2080  int linesize = s->linesize;
2081  int uvlinesize = s->uvlinesize;
2082  static const uint8_t hev_thresh_lut[2][64] = {
2083  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
2084  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2085  3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
2086  3, 3, 3, 3 },
2087  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
2088  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2089  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2090  2, 2, 2, 2 }
2091  };
2092 
2093  if (!filter_level)
2094  return;
2095 
2096  if (is_vp7) {
2097  bedge_lim_y = filter_level;
2098  bedge_lim_uv = filter_level * 2;
2099  mbedge_lim = filter_level + 2;
2100  } else {
2101  bedge_lim_y =
2102  bedge_lim_uv = filter_level * 2 + inner_limit;
2103  mbedge_lim = bedge_lim_y + 4;
2104  }
2105 
2106  hev_thresh = hev_thresh_lut[s->keyframe][filter_level];
2107 
2108  if (mb_x) {
2109  s->vp8dsp.vp8_h_loop_filter16y(dst[0], linesize,
2110  mbedge_lim, inner_limit, hev_thresh);
2111  s->vp8dsp.vp8_h_loop_filter8uv(dst[1], dst[2], uvlinesize,
2112  mbedge_lim, inner_limit, hev_thresh);
2113  }
2114 
2115 #define H_LOOP_FILTER_16Y_INNER(cond) \
2116  if (cond && inner_filter) { \
2117  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 4, linesize, \
2118  bedge_lim_y, inner_limit, \
2119  hev_thresh); \
2120  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 8, linesize, \
2121  bedge_lim_y, inner_limit, \
2122  hev_thresh); \
2123  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 12, linesize, \
2124  bedge_lim_y, inner_limit, \
2125  hev_thresh); \
2126  s->vp8dsp.vp8_h_loop_filter8uv_inner(dst[1] + 4, dst[2] + 4, \
2127  uvlinesize, bedge_lim_uv, \
2128  inner_limit, hev_thresh); \
2129  }
2130 
2131  H_LOOP_FILTER_16Y_INNER(!is_vp7)
2132 
2133  if (mb_y) {
2134  s->vp8dsp.vp8_v_loop_filter16y(dst[0], linesize,
2135  mbedge_lim, inner_limit, hev_thresh);
2136  s->vp8dsp.vp8_v_loop_filter8uv(dst[1], dst[2], uvlinesize,
2137  mbedge_lim, inner_limit, hev_thresh);
2138  }
2139 
2140  if (inner_filter) {
2141  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 4 * linesize,
2142  linesize, bedge_lim_y,
2143  inner_limit, hev_thresh);
2144  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 8 * linesize,
2145  linesize, bedge_lim_y,
2146  inner_limit, hev_thresh);
2147  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 12 * linesize,
2148  linesize, bedge_lim_y,
2149  inner_limit, hev_thresh);
2150  s->vp8dsp.vp8_v_loop_filter8uv_inner(dst[1] + 4 * uvlinesize,
2151  dst[2] + 4 * uvlinesize,
2152  uvlinesize, bedge_lim_uv,
2153  inner_limit, hev_thresh);
2154  }
2155 
2156  H_LOOP_FILTER_16Y_INNER(is_vp7)
2157 }
2158 
2159 static av_always_inline
2161  int mb_x, int mb_y)
2162 {
2163  int mbedge_lim, bedge_lim;
2164  int filter_level = f->filter_level;
2165  int inner_limit = f->inner_limit;
2166  int inner_filter = f->inner_filter;
2167  int linesize = s->linesize;
2168 
2169  if (!filter_level)
2170  return;
2171 
2172  bedge_lim = 2 * filter_level + inner_limit;
2173  mbedge_lim = bedge_lim + 4;
2174 
2175  if (mb_x)
2176  s->vp8dsp.vp8_h_loop_filter_simple(dst, linesize, mbedge_lim);
2177  if (inner_filter) {
2178  s->vp8dsp.vp8_h_loop_filter_simple(dst + 4, linesize, bedge_lim);
2179  s->vp8dsp.vp8_h_loop_filter_simple(dst + 8, linesize, bedge_lim);
2180  s->vp8dsp.vp8_h_loop_filter_simple(dst + 12, linesize, bedge_lim);
2181  }
2182 
2183  if (mb_y)
2184  s->vp8dsp.vp8_v_loop_filter_simple(dst, linesize, mbedge_lim);
2185  if (inner_filter) {
2186  s->vp8dsp.vp8_v_loop_filter_simple(dst + 4 * linesize, linesize, bedge_lim);
2187  s->vp8dsp.vp8_v_loop_filter_simple(dst + 8 * linesize, linesize, bedge_lim);
2188  s->vp8dsp.vp8_v_loop_filter_simple(dst + 12 * linesize, linesize, bedge_lim);
2189  }
2190 }
2191 
2192 #define MARGIN (16 << 2)
2193 static av_always_inline
2195  VP8Frame *prev_frame, int is_vp7)
2196 {
2197  VP8Context *s = avctx->priv_data;
2198  int mb_x, mb_y;
2199 
2200  s->mv_min.y = -MARGIN;
2201  s->mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
2202  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
2204  ((s->mb_width + 1) * (mb_y + 1) + 1);
2205  int mb_xy = mb_y * s->mb_width;
2206 
2207  AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED * 0x01010101);
2208 
2209  s->mv_min.x = -MARGIN;
2210  s->mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
2211  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
2212  if (mb_y == 0)
2213  AV_WN32A((mb - s->mb_width - 1)->intra4x4_pred_mode_top,
2214  DC_PRED * 0x01010101);
2215  decode_mb_mode(s, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
2216  prev_frame && prev_frame->seg_map ?
2217  prev_frame->seg_map->data + mb_xy : NULL, 1, is_vp7);
2218  s->mv_min.x -= 64;
2219  s->mv_max.x -= 64;
2220  }
2221  s->mv_min.y -= 64;
2222  s->mv_max.y -= 64;
2223  }
2224 }
2225 
2226 static void vp7_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame,
2227  VP8Frame *prev_frame)
2228 {
2229  vp78_decode_mv_mb_modes(avctx, cur_frame, prev_frame, IS_VP7);
2230 }
2231 
2232 static void vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame,
2233  VP8Frame *prev_frame)
2234 {
2235  vp78_decode_mv_mb_modes(avctx, cur_frame, prev_frame, IS_VP8);
2236 }
2237 
2238 #if HAVE_THREADS
2239 #define check_thread_pos(td, otd, mb_x_check, mb_y_check) \
2240  do { \
2241  int tmp = (mb_y_check << 16) | (mb_x_check & 0xFFFF); \
2242  if (otd->thread_mb_pos < tmp) { \
2243  pthread_mutex_lock(&otd->lock); \
2244  td->wait_mb_pos = tmp; \
2245  do { \
2246  if (otd->thread_mb_pos >= tmp) \
2247  break; \
2248  pthread_cond_wait(&otd->cond, &otd->lock); \
2249  } while (1); \
2250  td->wait_mb_pos = INT_MAX; \
2251  pthread_mutex_unlock(&otd->lock); \
2252  } \
2253  } while (0)
2254 
2255 #define update_pos(td, mb_y, mb_x) \
2256  do { \
2257  int pos = (mb_y << 16) | (mb_x & 0xFFFF); \
2258  int sliced_threading = (avctx->active_thread_type == FF_THREAD_SLICE) && \
2259  (num_jobs > 1); \
2260  int is_null = !next_td || !prev_td; \
2261  int pos_check = (is_null) ? 1 \
2262  : (next_td != td && \
2263  pos >= next_td->wait_mb_pos) || \
2264  (prev_td != td && \
2265  pos >= prev_td->wait_mb_pos); \
2266  td->thread_mb_pos = pos; \
2267  if (sliced_threading && pos_check) { \
2268  pthread_mutex_lock(&td->lock); \
2269  pthread_cond_broadcast(&td->cond); \
2270  pthread_mutex_unlock(&td->lock); \
2271  } \
2272  } while (0)
2273 #else
2274 #define check_thread_pos(td, otd, mb_x_check, mb_y_check) while(0)
2275 #define update_pos(td, mb_y, mb_x) while(0)
2276 #endif
2277 
2279  int jobnr, int threadnr, int is_vp7)
2280 {
2281  VP8Context *s = avctx->priv_data;
2282  VP8ThreadData *prev_td, *next_td, *td = &s->thread_data[threadnr];
2283  int mb_y = td->thread_mb_pos >> 16;
2284  int mb_x, mb_xy = mb_y * s->mb_width;
2285  int num_jobs = s->num_jobs;
2286  VP8Frame *curframe = s->curframe, *prev_frame = s->prev_frame;
2287  VP56RangeCoder *c = &s->coeff_partition[mb_y & (s->num_coeff_partitions - 1)];
2288  VP8Macroblock *mb;
2289  uint8_t *dst[3] = {
2290  curframe->tf.f->data[0] + 16 * mb_y * s->linesize,
2291  curframe->tf.f->data[1] + 8 * mb_y * s->uvlinesize,
2292  curframe->tf.f->data[2] + 8 * mb_y * s->uvlinesize
2293  };
2294  if (mb_y == 0)
2295  prev_td = td;
2296  else
2297  prev_td = &s->thread_data[(jobnr + num_jobs - 1) % num_jobs];
2298  if (mb_y == s->mb_height - 1)
2299  next_td = td;
2300  else
2301  next_td = &s->thread_data[(jobnr + 1) % num_jobs];
2302  if (s->mb_layout == 1)
2303  mb = s->macroblocks_base + ((s->mb_width + 1) * (mb_y + 1) + 1);
2304  else {
2305  // Make sure the previous frame has read its segmentation map,
2306  // if we re-use the same map.
2307  if (prev_frame && s->segmentation.enabled &&
2309  ff_thread_await_progress(&prev_frame->tf, mb_y, 0);
2310  mb = s->macroblocks + (s->mb_height - mb_y - 1) * 2;
2311  memset(mb - 1, 0, sizeof(*mb)); // zero left macroblock
2312  AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED * 0x01010101);
2313  }
2314 
2315  if (!is_vp7 || mb_y == 0)
2316  memset(td->left_nnz, 0, sizeof(td->left_nnz));
2317 
2318  s->mv_min.x = -MARGIN;
2319  s->mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
2320 
2321  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
2322  // Wait for previous thread to read mb_x+2, and reach mb_y-1.
2323  if (prev_td != td) {
2324  if (threadnr != 0) {
2325  check_thread_pos(td, prev_td,
2326  mb_x + (is_vp7 ? 2 : 1),
2327  mb_y - (is_vp7 ? 2 : 1));
2328  } else {
2329  check_thread_pos(td, prev_td,
2330  mb_x + (is_vp7 ? 2 : 1) + s->mb_width + 3,
2331  mb_y - (is_vp7 ? 2 : 1));
2332  }
2333  }
2334 
2335  s->vdsp.prefetch(dst[0] + (mb_x & 3) * 4 * s->linesize + 64,
2336  s->linesize, 4);
2337  s->vdsp.prefetch(dst[1] + (mb_x & 7) * s->uvlinesize + 64,
2338  dst[2] - dst[1], 2);
2339 
2340  if (!s->mb_layout)
2341  decode_mb_mode(s, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
2342  prev_frame && prev_frame->seg_map ?
2343  prev_frame->seg_map->data + mb_xy : NULL, 0, is_vp7);
2344 
2345  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_PREVIOUS);
2346 
2347  if (!mb->skip)
2348  decode_mb_coeffs(s, td, c, mb, s->top_nnz[mb_x], td->left_nnz, is_vp7);
2349 
2350  if (mb->mode <= MODE_I4x4)
2351  intra_predict(s, td, dst, mb, mb_x, mb_y, is_vp7);
2352  else
2353  inter_predict(s, td, dst, mb, mb_x, mb_y);
2354 
2355  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN);
2356 
2357  if (!mb->skip) {
2358  idct_mb(s, td, dst, mb);
2359  } else {
2360  AV_ZERO64(td->left_nnz);
2361  AV_WN64(s->top_nnz[mb_x], 0); // array of 9, so unaligned
2362 
2363  /* Reset DC block predictors if they would exist
2364  * if the mb had coefficients */
2365  if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
2366  td->left_nnz[8] = 0;
2367  s->top_nnz[mb_x][8] = 0;
2368  }
2369  }
2370 
2371  if (s->deblock_filter)
2372  filter_level_for_mb(s, mb, &td->filter_strength[mb_x], is_vp7);
2373 
2374  if (s->deblock_filter && num_jobs != 1 && threadnr == num_jobs - 1) {
2375  if (s->filter.simple)
2376  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2377  NULL, NULL, s->linesize, 0, 1);
2378  else
2379  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2380  dst[1], dst[2], s->linesize, s->uvlinesize, 0);
2381  }
2382 
2383  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN2);
2384 
2385  dst[0] += 16;
2386  dst[1] += 8;
2387  dst[2] += 8;
2388  s->mv_min.x -= 64;
2389  s->mv_max.x -= 64;
2390 
2391  if (mb_x == s->mb_width + 1) {
2392  update_pos(td, mb_y, s->mb_width + 3);
2393  } else {
2394  update_pos(td, mb_y, mb_x);
2395  }
2396  }
2397 }
2398 
2399 static void vp7_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata,
2400  int jobnr, int threadnr)
2401 {
2402  decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr, 1);
2403 }
2404 
2405 static void vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata,
2406  int jobnr, int threadnr)
2407 {
2408  decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr, 0);
2409 }
2410 
2411 static av_always_inline void filter_mb_row(AVCodecContext *avctx, void *tdata,
2412  int jobnr, int threadnr, int is_vp7)
2413 {
2414  VP8Context *s = avctx->priv_data;
2415  VP8ThreadData *td = &s->thread_data[threadnr];
2416  int mb_x, mb_y = td->thread_mb_pos >> 16, num_jobs = s->num_jobs;
2417  AVFrame *curframe = s->curframe->tf.f;
2418  VP8Macroblock *mb;
2419  VP8ThreadData *prev_td, *next_td;
2420  uint8_t *dst[3] = {
2421  curframe->data[0] + 16 * mb_y * s->linesize,
2422  curframe->data[1] + 8 * mb_y * s->uvlinesize,
2423  curframe->data[2] + 8 * mb_y * s->uvlinesize
2424  };
2425 
2426  if (s->mb_layout == 1)
2427  mb = s->macroblocks_base + ((s->mb_width + 1) * (mb_y + 1) + 1);
2428  else
2429  mb = s->macroblocks + (s->mb_height - mb_y - 1) * 2;
2430 
2431  if (mb_y == 0)
2432  prev_td = td;
2433  else
2434  prev_td = &s->thread_data[(jobnr + num_jobs - 1) % num_jobs];
2435  if (mb_y == s->mb_height - 1)
2436  next_td = td;
2437  else
2438  next_td = &s->thread_data[(jobnr + 1) % num_jobs];
2439 
2440  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb++) {
2441  VP8FilterStrength *f = &td->filter_strength[mb_x];
2442  if (prev_td != td)
2443  check_thread_pos(td, prev_td,
2444  (mb_x + 1) + (s->mb_width + 3), mb_y - 1);
2445  if (next_td != td)
2446  if (next_td != &s->thread_data[0])
2447  check_thread_pos(td, next_td, mb_x + 1, mb_y + 1);
2448 
2449  if (num_jobs == 1) {
2450  if (s->filter.simple)
2451  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2452  NULL, NULL, s->linesize, 0, 1);
2453  else
2454  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2455  dst[1], dst[2], s->linesize, s->uvlinesize, 0);
2456  }
2457 
2458  if (s->filter.simple)
2459  filter_mb_simple(s, dst[0], f, mb_x, mb_y);
2460  else
2461  filter_mb(s, dst, f, mb_x, mb_y, is_vp7);
2462  dst[0] += 16;
2463  dst[1] += 8;
2464  dst[2] += 8;
2465 
2466  update_pos(td, mb_y, (s->mb_width + 3) + mb_x);
2467  }
2468 }
2469 
2470 static void vp7_filter_mb_row(AVCodecContext *avctx, void *tdata,
2471  int jobnr, int threadnr)
2472 {
2473  filter_mb_row(avctx, tdata, jobnr, threadnr, 1);
2474 }
2475 
2476 static void vp8_filter_mb_row(AVCodecContext *avctx, void *tdata,
2477  int jobnr, int threadnr)
2478 {
2479  filter_mb_row(avctx, tdata, jobnr, threadnr, 0);
2480 }
2481 
2482 static av_always_inline
2483 int vp78_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr,
2484  int threadnr, int is_vp7)
2485 {
2486  VP8Context *s = avctx->priv_data;
2487  VP8ThreadData *td = &s->thread_data[jobnr];
2488  VP8ThreadData *next_td = NULL, *prev_td = NULL;
2489  VP8Frame *curframe = s->curframe;
2490  int mb_y, num_jobs = s->num_jobs;
2491 
2492  td->thread_nr = threadnr;
2493  for (mb_y = jobnr; mb_y < s->mb_height; mb_y += num_jobs) {
2494  if (mb_y >= s->mb_height)
2495  break;
2496  td->thread_mb_pos = mb_y << 16;
2497  s->decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr);
2498  if (s->deblock_filter)
2499  s->filter_mb_row(avctx, tdata, jobnr, threadnr);
2500  update_pos(td, mb_y, INT_MAX & 0xFFFF);
2501 
2502  s->mv_min.y -= 64;
2503  s->mv_max.y -= 64;
2504 
2505  if (avctx->active_thread_type == FF_THREAD_FRAME)
2506  ff_thread_report_progress(&curframe->tf, mb_y, 0);
2507  }
2508 
2509  return 0;
2510 }
2511 
2512 static int vp7_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata,
2513  int jobnr, int threadnr)
2514 {
2515  return vp78_decode_mb_row_sliced(avctx, tdata, jobnr, threadnr, IS_VP7);
2516 }
2517 
2518 static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata,
2519  int jobnr, int threadnr)
2520 {
2521  return vp78_decode_mb_row_sliced(avctx, tdata, jobnr, threadnr, IS_VP8);
2522 }
2523 
2524 
2525 static av_always_inline
2526 int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2527  AVPacket *avpkt, int is_vp7)
2528 {
2529  VP8Context *s = avctx->priv_data;
2530  int ret, i, referenced, num_jobs;
2531  enum AVDiscard skip_thresh;
2532  VP8Frame *av_uninit(curframe), *prev_frame;
2533 
2534  if (is_vp7)
2535  ret = vp7_decode_frame_header(s, avpkt->data, avpkt->size);
2536  else
2537  ret = vp8_decode_frame_header(s, avpkt->data, avpkt->size);
2538 
2539  if (ret < 0)
2540  goto err;
2541 
2542  prev_frame = s->framep[VP56_FRAME_CURRENT];
2543 
2544  referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT ||
2546 
2547  skip_thresh = !referenced ? AVDISCARD_NONREF
2548  : !s->keyframe ? AVDISCARD_NONKEY
2549  : AVDISCARD_ALL;
2550 
2551  if (avctx->skip_frame >= skip_thresh) {
2552  s->invisible = 1;
2553  memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
2554  goto skip_decode;
2555  }
2556  s->deblock_filter = s->filter.level && avctx->skip_loop_filter < skip_thresh;
2557 
2558  // release no longer referenced frames
2559  for (i = 0; i < 5; i++)
2560  if (s->frames[i].tf.f->data[0] &&
2561  &s->frames[i] != prev_frame &&
2562  &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
2563  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
2564  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2])
2565  vp8_release_frame(s, &s->frames[i]);
2566 
2567  curframe = s->framep[VP56_FRAME_CURRENT] = vp8_find_free_buffer(s);
2568 
2569  if (!s->colorspace)
2570  avctx->colorspace = AVCOL_SPC_BT470BG;
2571  if (s->fullrange)
2572  avctx->color_range = AVCOL_RANGE_JPEG;
2573  else
2574  avctx->color_range = AVCOL_RANGE_MPEG;
2575 
2576  /* Given that arithmetic probabilities are updated every frame, it's quite
2577  * likely that the values we have on a random interframe are complete
2578  * junk if we didn't start decode on a keyframe. So just don't display
2579  * anything rather than junk. */
2580  if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] ||
2581  !s->framep[VP56_FRAME_GOLDEN] ||
2582  !s->framep[VP56_FRAME_GOLDEN2])) {
2583  av_log(avctx, AV_LOG_WARNING,
2584  "Discarding interframe without a prior keyframe!\n");
2585  ret = AVERROR_INVALIDDATA;
2586  goto err;
2587  }
2588 
2589  curframe->tf.f->key_frame = s->keyframe;
2590  curframe->tf.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I
2592  if ((ret = vp8_alloc_frame(s, curframe, referenced)) < 0)
2593  goto err;
2594 
2595  // check if golden and altref are swapped
2596  if (s->update_altref != VP56_FRAME_NONE)
2598  else
2600 
2601  if (s->update_golden != VP56_FRAME_NONE)
2603  else
2605 
2606  if (s->update_last)
2607  s->next_framep[VP56_FRAME_PREVIOUS] = curframe;
2608  else
2610 
2611  s->next_framep[VP56_FRAME_CURRENT] = curframe;
2612 
2613  if (avctx->codec->update_thread_context)
2614  ff_thread_finish_setup(avctx);
2615 
2616  s->linesize = curframe->tf.f->linesize[0];
2617  s->uvlinesize = curframe->tf.f->linesize[1];
2618 
2619  memset(s->top_nnz, 0, s->mb_width * sizeof(*s->top_nnz));
2620  /* Zero macroblock structures for top/top-left prediction
2621  * from outside the frame. */
2622  if (!s->mb_layout)
2623  memset(s->macroblocks + s->mb_height * 2 - 1, 0,
2624  (s->mb_width + 1) * sizeof(*s->macroblocks));
2625  if (!s->mb_layout && s->keyframe)
2626  memset(s->intra4x4_pred_mode_top, DC_PRED, s->mb_width * 4);
2627 
2628  memset(s->ref_count, 0, sizeof(s->ref_count));
2629 
2630  if (s->mb_layout == 1) {
2631  // Make sure the previous frame has read its segmentation map,
2632  // if we re-use the same map.
2633  if (prev_frame && s->segmentation.enabled &&
2635  ff_thread_await_progress(&prev_frame->tf, 1, 0);
2636  if (is_vp7)
2637  vp7_decode_mv_mb_modes(avctx, curframe, prev_frame);
2638  else
2639  vp8_decode_mv_mb_modes(avctx, curframe, prev_frame);
2640  }
2641 
2642  if (avctx->active_thread_type == FF_THREAD_FRAME)
2643  num_jobs = 1;
2644  else
2645  num_jobs = FFMIN(s->num_coeff_partitions, avctx->thread_count);
2646  s->num_jobs = num_jobs;
2647  s->curframe = curframe;
2648  s->prev_frame = prev_frame;
2649  s->mv_min.y = -MARGIN;
2650  s->mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
2651  for (i = 0; i < MAX_THREADS; i++) {
2652  s->thread_data[i].thread_mb_pos = 0;
2653  s->thread_data[i].wait_mb_pos = INT_MAX;
2654  }
2655  if (is_vp7)
2657  num_jobs);
2658  else
2660  num_jobs);
2661 
2662  ff_thread_report_progress(&curframe->tf, INT_MAX, 0);
2663  memcpy(&s->framep[0], &s->next_framep[0], sizeof(s->framep[0]) * 4);
2664 
2665 skip_decode:
2666  // if future frames don't use the updated probabilities,
2667  // reset them to the values we saved
2668  if (!s->update_probabilities)
2669  s->prob[0] = s->prob[1];
2670 
2671  if (!s->invisible) {
2672  if ((ret = av_frame_ref(data, curframe->tf.f)) < 0)
2673  return ret;
2674  *got_frame = 1;
2675  }
2676 
2677  return avpkt->size;
2678 err:
2679  memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
2680  return ret;
2681 }
2682 
2683 int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2684  AVPacket *avpkt)
2685 {
2686  return vp78_decode_frame(avctx, data, got_frame, avpkt, IS_VP8);
2687 }
2688 
2689 #if CONFIG_VP7_DECODER
2690 static int vp7_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2691  AVPacket *avpkt)
2692 {
2693  return vp78_decode_frame(avctx, data, got_frame, avpkt, IS_VP7);
2694 }
2695 #endif /* CONFIG_VP7_DECODER */
2696 
2698 {
2699  VP8Context *s = avctx->priv_data;
2700  int i;
2701 
2702  if (!s)
2703  return 0;
2704 
2705  vp8_decode_flush_impl(avctx, 1);
2706  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
2707  av_frame_free(&s->frames[i].tf.f);
2708 
2709  return 0;
2710 }
2711 
2713 {
2714  int i;
2715  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++) {
2716  s->frames[i].tf.f = av_frame_alloc();
2717  if (!s->frames[i].tf.f)
2718  return AVERROR(ENOMEM);
2719  }
2720  return 0;
2721 }
2722 
2723 static av_always_inline
2724 int vp78_decode_init(AVCodecContext *avctx, int is_vp7)
2725 {
2726  VP8Context *s = avctx->priv_data;
2727  int ret;
2728 
2729  s->avctx = avctx;
2730  s->vp7 = avctx->codec->id == AV_CODEC_ID_VP7;
2731  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2732  avctx->internal->allocate_progress = 1;
2733 
2734  ff_videodsp_init(&s->vdsp, 8);
2735 
2736  ff_vp78dsp_init(&s->vp8dsp);
2737  if (CONFIG_VP7_DECODER && is_vp7) {
2739  ff_vp7dsp_init(&s->vp8dsp);
2742  } else if (CONFIG_VP8_DECODER && !is_vp7) {
2744  ff_vp8dsp_init(&s->vp8dsp);
2747  }
2748 
2749  /* does not change for VP8 */
2750  memcpy(s->prob[0].scan, ff_zigzag_scan, sizeof(s->prob[0].scan));
2751 
2752  if ((ret = vp8_init_frames(s)) < 0) {
2753  ff_vp8_decode_free(avctx);
2754  return ret;
2755  }
2756 
2757  return 0;
2758 }
2759 
2760 #if CONFIG_VP7_DECODER
2761 static int vp7_decode_init(AVCodecContext *avctx)
2762 {
2763  return vp78_decode_init(avctx, IS_VP7);
2764 }
2765 #endif /* CONFIG_VP7_DECODER */
2766 
2768 {
2769  return vp78_decode_init(avctx, IS_VP8);
2770 }
2771 
2772 #if CONFIG_VP8_DECODER
2773 #if HAVE_THREADS
2774 static av_cold int vp8_decode_init_thread_copy(AVCodecContext *avctx)
2775 {
2776  VP8Context *s = avctx->priv_data;
2777  int ret;
2778 
2779  s->avctx = avctx;
2780 
2781  if ((ret = vp8_init_frames(s)) < 0) {
2782  ff_vp8_decode_free(avctx);
2783  return ret;
2784  }
2785 
2786  return 0;
2787 }
2788 
2789 #define REBASE(pic) ((pic) ? (pic) - &s_src->frames[0] + &s->frames[0] : NULL)
2790 
2791 static int vp8_decode_update_thread_context(AVCodecContext *dst,
2792  const AVCodecContext *src)
2793 {
2794  VP8Context *s = dst->priv_data, *s_src = src->priv_data;
2795  int i;
2796 
2797  if (s->macroblocks_base &&
2798  (s_src->mb_width != s->mb_width || s_src->mb_height != s->mb_height)) {
2799  free_buffers(s);
2800  s->mb_width = s_src->mb_width;
2801  s->mb_height = s_src->mb_height;
2802  }
2803 
2804  s->prob[0] = s_src->prob[!s_src->update_probabilities];
2805  s->segmentation = s_src->segmentation;
2806  s->lf_delta = s_src->lf_delta;
2807  memcpy(s->sign_bias, s_src->sign_bias, sizeof(s->sign_bias));
2808 
2809  for (i = 0; i < FF_ARRAY_ELEMS(s_src->frames); i++) {
2810  if (s_src->frames[i].tf.f->data[0]) {
2811  int ret = vp8_ref_frame(s, &s->frames[i], &s_src->frames[i]);
2812  if (ret < 0)
2813  return ret;
2814  }
2815  }
2816 
2817  s->framep[0] = REBASE(s_src->next_framep[0]);
2818  s->framep[1] = REBASE(s_src->next_framep[1]);
2819  s->framep[2] = REBASE(s_src->next_framep[2]);
2820  s->framep[3] = REBASE(s_src->next_framep[3]);
2821 
2822  return 0;
2823 }
2824 #endif /* HAVE_THREADS */
2825 #endif /* CONFIG_VP8_DECODER */
2826 
2827 #if CONFIG_VP7_DECODER
2828 AVCodec ff_vp7_decoder = {
2829  .name = "vp7",
2830  .long_name = NULL_IF_CONFIG_SMALL("On2 VP7"),
2831  .type = AVMEDIA_TYPE_VIDEO,
2832  .id = AV_CODEC_ID_VP7,
2833  .priv_data_size = sizeof(VP8Context),
2834  .init = vp7_decode_init,
2835  .close = ff_vp8_decode_free,
2836  .decode = vp7_decode_frame,
2837  .capabilities = AV_CODEC_CAP_DR1,
2839 };
2840 #endif /* CONFIG_VP7_DECODER */
2841 
2842 #if CONFIG_VP8_DECODER
2843 AVCodec ff_vp8_decoder = {
2844  .name = "vp8",
2845  .long_name = NULL_IF_CONFIG_SMALL("On2 VP8"),
2846  .type = AVMEDIA_TYPE_VIDEO,
2847  .id = AV_CODEC_ID_VP8,
2848  .priv_data_size = sizeof(VP8Context),
2850  .close = ff_vp8_decode_free,
2852  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
2855  .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp8_decode_init_thread_copy),
2856  .update_thread_context = ONLY_IF_THREADS_ENABLED(vp8_decode_update_thread_context),
2857 };
2858 #endif /* CONFIG_VP7_DECODER */
uint8_t golden
Definition: vp8.h:235
uint8_t inner_limit
Definition: vp8.h:75
#define VERT_PRED8x8
Definition: h264pred.h:70
VP8Macroblock * macroblocks
Definition: vp8.h:178
static const uint8_t vp8_dc_qlookup[VP8_MAX_QUANT+1]
Definition: vp8data.h:711
static av_always_inline void intra_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y, int is_vp7)
Definition: vp8.c:1588
static const uint8_t vp8_submv_prob[5][3]
Definition: vp8data.h:153
static const uint16_t vp7_ydc_qlookup[]
Definition: vp8data.h:778
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
#define NULL
Definition: coverity.c:32
const struct AVCodec * codec
Definition: avcodec.h:1658
discard all frames except keyframes
Definition: avcodec.h:783
Definition: vp9.h:83
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
Definition: videodsp.h:76
const char * s
Definition: avisynth_c.h:631
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static const uint8_t vp7_mv_default_prob[2][17]
Definition: vp8data.h:744
#define DC_128_PRED8x8
Definition: h264pred.h:76
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
Definition: os2threads.h:106
(only used in prediction) no split MVs
Definition: vp8.h:70
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:124
void ff_vp7dsp_init(VP8DSPContext *c)
static void update_lf_deltas(VP8Context *s)
Definition: vp8.c:236
This structure describes decoded (raw) audio or video data.
Definition: frame.h:184
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
static void flush(AVCodecContext *avctx)
static const uint8_t vp7_pred4x4_mode[]
Definition: vp8data.h:33
int8_t sign_bias[4]
one state [0, 1] per ref frame type
Definition: vp8.h:156
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1851
static av_always_inline int inter_predict_dc(int16_t block[16], int16_t pred[2])
Definition: vp8.c:1313
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
static int init_thread_copy(AVCodecContext *avctx)
Definition: tta.c:390
#define VP7_MV_PRED_COUNT
Definition: vp8data.h:68
AVFrame * f
Definition: thread.h:36
static av_always_inline int vp8_rac_get_tree(VP56RangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
Definition: vp56.h:376
uint8_t feature_value[4][4]
Definition: vp8.h:301
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:210
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:440
static av_always_inline void decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2278
uint8_t * intra4x4_pred_mode_top
Definition: vp8.h:180
uint8_t mbskip_enabled
Definition: vp8.h:151
static VP56Frame ref_to_update(VP8Context *s, int update, VP56Frame ref)
Determine which buffers golden and altref should be updated with after this frame.
Definition: vp8.c:351
void(* vp8_v_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:48
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2385
static int vp7_decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2], const uint8_t scan[16])
Definition: vp8.c:1335
uint8_t token[4][16][3][NUM_DCT_TOKENS-1]
Definition: vp8.h:238
uint8_t scan[16]
Definition: vp8.h:240
int linesize
Definition: vp8.h:146
int size
Definition: avcodec.h:1581
const char * b
Definition: vf_curves.c:109
static void vp8_decode_flush(AVCodecContext *avctx)
Definition: vp8.c:120
#define MARGIN
Definition: vp8.c:2192
vp8_mc_func put_vp8_bilinear_pixels_tab[3][3][3]
Definition: vp8dsp.h:81
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1877
VP56mv bmv[16]
Definition: vp8.h:91
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:411
uint8_t inner_filter
Definition: vp8.h:76
static const int8_t vp8_pred8x8c_tree[3][2]
Definition: vp8data.h:180
uint8_t segmentid[3]
Definition: vp8.h:231
static const uint16_t vp7_y2dc_qlookup[]
Definition: vp8data.h:803
discard all
Definition: avcodec.h:784
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
static void copy_chroma(AVFrame *dst, AVFrame *src, int width, int height)
Definition: vp8.c:426
#define HOR_PRED8x8
Definition: h264pred.h:69
AVCodec.
Definition: avcodec.h:3542
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:87
uint8_t sharpness
Definition: vp8.h:175
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
2 16x8 blocks (vertical)
Definition: vp8.h:66
#define AV_COPY32(d, s)
Definition: intreadwrite.h:586
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
Definition: os2threads.h:138
int update_probabilities
If this flag is not set, all the probability updates are discarded after this frame is decoded...
Definition: vp8.h:253
VP8Frame * framep[4]
Definition: vp8.h:139
static int vp8_decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2])
Definition: vp8.c:1347
struct VP8Context::@123 qmat[4]
Macroblocks can have one of 4 different quants in a frame when segmentation is enabled.
static void vp7_filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2470
#define VP7_MVC_SIZE
Definition: vp8.c:392
static int vp7_read_mv_component(VP56RangeCoder *c, const uint8_t *p)
Definition: vp8.c:807
vp8_mc_func put_vp8_epel_pixels_tab[3][3][3]
first dimension: width>>3, height is assumed equal to width second dimension: 0 if no vertical interp...
Definition: vp8dsp.h:80
static av_always_inline const uint8_t * get_submv_prob(uint32_t left, uint32_t top, int is_vp7)
Definition: vp8.c:818
static const uint8_t vp8_pred8x8c_prob_inter[3]
Definition: vp8data.h:189
static av_always_inline int decode_block_coeffs(VP56RangeCoder *c, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, int zero_nhood, int16_t qmul[2], const uint8_t scan[16], int vp7)
Definition: vp8.c:1372
uint8_t(* top_nnz)[9]
Definition: vp8.h:220
int num_jobs
Definition: vp8.h:270
static const uint8_t vp8_mbsplits[5][16]
Definition: vp8data.h:127
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:3264
#define AV_RN32A(p)
Definition: intreadwrite.h:526
static int16_t block[64]
Definition: dct.c:113
uint8_t pred16x16[4]
Definition: vp8.h:236
static const int8_t vp8_pred16x16_tree_intra[4][2]
Definition: vp8data.h:47
uint8_t update_map
Definition: vp8.h:167
#define PLANE_PRED8x8
Definition: h264pred.h:71
uint16_t mb_height
Definition: vp8.h:145
int16_t y
Definition: vp56.h:67
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
int update_golden
VP56_FRAME_NONE if not updated, or which frame to copy if so.
Definition: vp8.h:246
static av_always_inline void filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2411
uint8_t intra4x4_pred_mode_top[4]
Definition: vp8.h:89
static av_always_inline void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int mb_x, int mb_y, int mb_width, int simple, int xchg)
Definition: vp8.c:1474
uint8_t
static int vp7_update_dimensions(VP8Context *s, int width, int height)
Definition: vp8.c:204
#define av_cold
Definition: attributes.h:82
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:140
#define mb
#define DC_PRED8x8
Definition: h264pred.h:68
int fade_present
Fade bit present in bitstream (VP7)
Definition: vp8.h:286
static av_always_inline void vp7_decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
Definition: vp8.c:936
mode
Definition: f_perms.c:27
static VP8Frame * vp8_find_free_buffer(VP8Context *s)
Definition: vp8.c:125
uint8_t ref_frame
Definition: vp8.h:84
static av_always_inline int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y, int *copy_buf, int vp7)
Definition: vp8.c:1552
struct VP8Context::@125 prob[2]
These are all of the updatable probabilities for binary decisions.
Multithreading support functions.
Definition: vp9.h:82
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: vp8.c:2683
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:374
uint8_t mvc[2][19]
Definition: vp8.h:239
VP56mv mv
Definition: vp8.h:90
int8_t base_quant[4]
Definition: vp8.h:168
static const uint8_t vp8_mv_update_prob[2][19]
Definition: vp8data.h:733
static AVFrame * frame
void(* pred8x8[4+3+4])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:97
int update_last
update VP56_FRAME_PREVIOUS with the current one
Definition: vp8.h:245
#define height
uint8_t * data
Definition: avcodec.h:1580
int8_t yoffset
Definition: vp8data.h:62
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
Definition: utils.c:3841
static void copy(LZOContext *c, int cnt)
Copies bytes from input to output buffer with checking.
Definition: lzo.c:85
static void parse_segment_info(VP8Context *s)
Definition: vp8.c:215
ptrdiff_t size
Definition: opengl_enc.c:101
VP8Frame * prev_frame
Definition: vp8.h:142
int num_coeff_partitions
All coefficients are contained in separate arith coding contexts.
Definition: vp8.h:259
static const uint8_t vp8_token_default_probs[4][8][3][NUM_DCT_TOKENS-1]
Definition: vp8data.h:369
static void fade(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int width, int height, int alpha, int beta)
Definition: vp8.c:437
vp8_mc_func put_pixels_tab[3][3][3]
Definition: vp8.h:264
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.h:93
#define AV_COPY64(d, s)
Definition: intreadwrite.h:590
uint8_t feature_index_prob[4][3]
Definition: vp8.h:300
uint8_t intra4x4_pred_mode_mb[16]
Definition: vp8.h:88
#define av_log(a,...)
static av_always_inline int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt, int is_vp7)
Definition: vp8.c:2526
unsigned m
Definition: audioconvert.c:187
uint8_t intra4x4_pred_mode_left[4]
Definition: vp8.h:181
#define VERT_VP8_PRED
for VP8, VERT_PRED is the average of
Definition: h264pred.h:60
av_cold void ff_vp78dsp_init(VP8DSPContext *dsp)
Definition: vp8dsp.c:664
uint8_t colorspace
0 is the only value allowed (meaning bt601)
Definition: vp8.h:267
static const VP56mv * get_bmv_ptr(const VP8Macroblock *mb, int subblock)
Definition: vp8.c:930
enum AVCodecID id
Definition: avcodec.h:3556
static const uint8_t vp8_mbsplit_count[4]
Definition: vp8data.h:142
static double alpha(void *priv, double x, double y)
Definition: vf_geq.c:99
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static const int8_t vp8_coeff_band_indexes[8][10]
Definition: vp8data.h:331
#define td
Definition: regdef.h:70
H264PredContext hpc
Definition: vp8.h:263
Definition: vp8.h:125
static const uint8_t vp8_pred4x4_mode[]
Definition: vp8data.h:40
static av_always_inline void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int mb_xy, int ref)
Definition: vp8.c:1871
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
uint8_t absolute_vals
Definition: vp8.h:166
uint16_t mb_width
Definition: vp8.h:144
void(* vp8_luma_dc_wht_dc)(int16_t block[4][4][16], int16_t dc[16])
Definition: vp8dsp.h:39
static const uint8_t vp8_dct_cat2_prob[]
Definition: vp8data.h:345
static const uint8_t vp8_mv_default_prob[2][19]
Definition: vp8data.h:755
static void vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2405
#define FF_SIGNBIT(x)
Definition: internal.h:75
uint8_t last
Definition: vp8.h:234
static const int sizes[][2]
Definition: img2dec.c:50
#define AVERROR(e)
Definition: error.h:43
void(* vp8_h_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:54
static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:640
uint8_t mode
Definition: vp8.h:83
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:153
static av_always_inline int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1517
static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2518
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:176
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:3098
const char * r
Definition: vf_curves.c:107
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
VP8 compatible video decoder.
void(* vp8_v_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:52
static const uint8_t vp8_mbfirstidx[4][16]
Definition: vp8data.h:135
AVCodecContext * avctx
Definition: vp8.h:138
#define EDGE_EMU_LINESIZE
Definition: vp8.h:120
uint16_t inter_dc_pred[2][2]
Interframe DC prediction (VP7) [0] VP56_FRAME_PREVIOUS [1] VP56_FRAME_GOLDEN.
Definition: vp8.h:293
VideoDSPContext vdsp
Definition: vp8.h:261
const char * name
Name of the codec implementation.
Definition: avcodec.h:3549
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_RL24
Definition: bytestream.h:87
VP8Macroblock * macroblocks_base
Definition: vp8.h:243
static av_always_inline void vp8_mc_part(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], ThreadFrame *ref_frame, int x_off, int y_off, int bx_off, int by_off, int block_w, int block_h, int width, int height, VP56mv *mv)
Definition: vp8.c:1833
static const uint8_t vp8_pred4x4_prob_inter[9]
Definition: vp8data.h:192
uint8_t edge_emu_buffer[21 *EDGE_EMU_LINESIZE]
Definition: vp8.h:121
int16_t block[6][4][16]
Definition: vp8.h:95
struct VP8Context::@124 lf_delta
static av_always_inline int decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2], const uint8_t scan[16], int vp7)
Definition: vp8.c:1253
static const int vp7_mode_contexts[31][4]
Definition: vp8data.h:84
static void vp8_filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2476
static void vp7_get_quants(VP8Context *s)
Definition: vp8.c:286
#define FFMAX(a, b)
Definition: common.h:94
uint8_t keyframe
Definition: vp8.h:149
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1019
struct VP8Context::@121 segmentation
Base parameters for segmentation, i.e.
int x
Definition: vp8.h:131
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:215
VP56Frame
Definition: vp56.h:39
int16_t luma_qmul[2]
Definition: vp8.h:190
static const uint8_t vp8_pred16x16_prob_inter[4]
Definition: vp8data.h:164
Definition: hls.c:67
static void vp7_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2399
useful rectangle filling function
#define MAX_THREADS
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int64_t nb_samples_notify, AVRational time_base)
#define cat(a, bpp, b)
Definition: vp9dsp_init.h:29
4x4 blocks of 4x4px each
Definition: vp8.h:69
uint8_t deblock_filter
Definition: vp8.h:150
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:3090
#define H_LOOP_FILTER_16Y_INNER(cond)
#define FFMIN(a, b)
Definition: common.h:96
uint8_t feature_present_prob[4]
Definition: vp8.h:299
static av_always_inline void vp8_mc_chroma(VP8Context *s, VP8ThreadData *td, uint8_t *dst1, uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3])
chroma MC function
Definition: vp8.c:1783
#define width
uint8_t fullrange
whether we can skip clamping in dsp functions
Definition: vp8.h:268
int16_t block_dc[16]
Definition: vp8.h:96
static av_unused int vp8_rac_get_sint(VP56RangeCoder *c, int bits)
Definition: vp56.h:332
int width
picture width / height.
Definition: avcodec.h:1836
uint8_t mbskip
Definition: vp8.h:232
int8_t ref[4]
filter strength adjustment for macroblocks that reference: [0] - intra / VP56_FRAME_CURRENT [1] - VP5...
Definition: vp8.h:216
void(* filter_mb_row)(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.h:279
void(* vp8_idct_dc_add4y)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
Definition: vp8dsp.h:42
static av_cold int vp8_init_frames(VP8Context *s)
Definition: vp8.c:2712
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
static void free_buffers(VP8Context *s)
Definition: vp8.c:49
int32_t
AVFormatContext * ctx
Definition: movenc.c:48
struct VP8Context::@122 filter
#define check_thread_pos(td, otd, mb_x_check, mb_y_check)
Definition: vp8.c:2274
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
Definition: os2threads.h:98
static int vp8_read_mv_component(VP56RangeCoder *c, const uint8_t *p)
Definition: vp8.c:812
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:3091
void(* vp8_mc_func)(uint8_t *dst, ptrdiff_t dstStride, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: vp8dsp.h:33
int16_t luma_dc_qmul[2]
luma dc-only block quant
Definition: vp8.h:191
int16_t chroma_qmul[2]
Definition: vp8.h:192
static const uint8_t vp8_pred4x4_prob_intra[10][10][9]
Definition: vp8data.h:196
uint8_t(* top_border)[16+8+8]
Definition: vp8.h:219
int n
Definition: avisynth_c.h:547
ThreadFrame tf
Definition: vp8.h:126
static av_always_inline void filter_level_for_mb(VP8Context *s, VP8Macroblock *mb, VP8FilterStrength *f, int is_vp7)
Definition: vp8.c:2040
#define src
Definition: vp9dsp.c:530
static const int8_t vp7_feature_index_tree[4][2]
Definition: vp8data.h:771
static const uint8_t vp7_feature_value_size[2][4]
Definition: vp8data.h:766
#define vp56_rac_get_prob
Definition: vp56.h:250
static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
Definition: vp8.c:107
static av_always_inline void decode_mb_coeffs(VP8Context *s, VP8ThreadData *td, VP56RangeCoder *c, VP8Macroblock *mb, uint8_t t_nnz[9], uint8_t l_nnz[9], int is_vp7)
Definition: vp8.c:1387
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
static void vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame, VP8Frame *prev_frame)
Definition: vp8.c:2232
uint8_t segment
Definition: vp8.h:87
#define FF_ARRAY_ELEMS(a)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:3079
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:457
int8_t xoffset
Definition: vp8data.h:63
static const float pred[4]
Definition: siprdata.h:259
static int vp7_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2512
#define IS_VP8
Definition: vp8dsp.h:104
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:1023
static const int8_t mv[256][2]
Definition: 4xm.c:77
static void vp7_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame, VP8Frame *prev_frame)
Definition: vp8.c:2226
static av_always_inline int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1526
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
Definition: vp56.h:267
void(* vp8_v_loop_filter8uv_inner)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:62
void(* vp8_h_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
Definition: vp8dsp.h:70
#define src1
Definition: h264pred.c:139
static av_always_inline void inter_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y)
Apply motion vectors to prediction buffer, chapter 18.
Definition: vp8.c:1894
VP8Frame * curframe
Definition: vp8.h:141
uint8_t simple
Definition: vp8.h:173
void(* vp8_idct_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
Definition: vp8dsp.h:40
VP8Frame frames[5]
Definition: vp8.h:265
Libavcodec external API header.
static const uint8_t vp8_pred8x8c_prob_intra[3]
Definition: vp8data.h:186
uint8_t level
Definition: vp8.h:174
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:215
static void vp8_release_frame(VP8Context *s, VP8Frame *f)
Definition: vp8.c:82
AVBufferRef * seg_map
Definition: vp8.h:127
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
static const uint16_t vp7_yac_qlookup[]
Definition: vp8data.h:790
main external API structure.
Definition: avcodec.h:1649
static int vp7_fade_frame(VP8Context *s, VP56RangeCoder *c)
Definition: vp8.c:451
uint8_t * data
The data buffer.
Definition: buffer.h:89
VP8Frame * next_framep[4]
Definition: vp8.h:140
int mb_layout
This describes the macroblock memory layout.
Definition: vp8.h:276
uint8_t left_nnz[9]
For coeff decode, we need to know whether the above block had non-zero coefficients.
Definition: vp8.h:111
static const uint8_t vp8_mbsplit_prob[3]
Definition: vp8data.h:145
VP56RangeCoder c
header context, includes mb modes and motion vectors
Definition: vp8.h:222
void * buf
Definition: avisynth_c.h:553
int y
Definition: vp8.h:132
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:98
VP56RangeCoder coeff_partition[8]
Definition: vp8.h:260
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:82
static const int8_t vp8_pred16x16_tree_inter[4][2]
Definition: vp8data.h:54
int vp7
Definition: vp8.h:281
static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:260
int coded_height
Definition: avcodec.h:1851
static int vp8_update_dimensions(VP8Context *s, int width, int height)
Definition: vp8.c:209
int index
Definition: gxfenc.c:89
VP8FilterStrength * filter_strength
Definition: vp8.h:122
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2378
VP8intmv mv_min
Definition: vp8.h:153
static av_always_inline void clamp_mv(VP8Context *s, VP56mv *dst, const VP56mv *src)
Definition: vp8.c:768
void(* vp8_idct_dc_add4uv)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
Definition: vp8dsp.h:44
static av_always_inline int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
Definition: vp8.c:1508
static void vp78_update_probability_tables(VP8Context *s)
Definition: vp8.c:376
#define MV_EDGE_CHECK(n)
static const int8_t vp8_pred4x4_tree[9][2]
Definition: vp8data.h:168
uint8_t enabled
whether each mb can have a different strength based on mode/ref
Definition: vp8.h:165
static av_always_inline void idct_mb(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb)
Definition: vp8.c:1978
static void vp78_update_pred16x16_pred8x8_mvc_probabilities(VP8Context *s, int mvc_size)
Definition: vp8.c:395
static av_always_inline int read_mv_component(VP56RangeCoder *c, const uint8_t *p, int vp7)
Motion vector coding, 17.1.
Definition: vp8.c:779
static const uint8_t subpel_idx[3][8]
Definition: vp8.c:1701
int uvlinesize
Definition: vp8.h:147
static void update_refs(VP8Context *s)
Definition: vp8.c:415
static av_always_inline int vp8_rac_get_coeff(VP56RangeCoder *c, const uint8_t *prob)
Definition: vp56.h:389
static const uint8_t vp8_coeff_band[16]
Definition: vp8data.h:325
#define u(width,...)
int allocate_progress
Whether to allocate progress for frame threading.
Definition: internal.h:126
static const uint16_t vp8_ac_qlookup[VP8_MAX_QUANT+1]
Definition: vp8data.h:722
static const uint8_t vp8_pred16x16_prob_intra[4]
Definition: vp8data.h:161
uint8_t score
Definition: vp8data.h:65
mfxU16 profile
Definition: qsvenc.c:42
static av_always_inline void decode_intra4x4_modes(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int mb_x, int keyframe, int layout)
Definition: vp8.c:1127
static int vp8_rac_get_uint(VP56RangeCoder *c, int bits)
Definition: vp56.h:320
#define DC_127_PRED8x8
Definition: h264pred.h:85
void ff_vp56_init_range_decoder(VP56RangeCoder *c, const uint8_t *buf, int buf_size)
Definition: vp56rac.c:40
void(* vp8_luma_dc_wht)(int16_t block[4][4][16], int16_t dc[16])
Definition: vp8dsp.h:38
Definition: vp56.h:65
av_cold int ff_vp8_decode_init(AVCodecContext *avctx)
Definition: vp8.c:2767
int update_altref
Definition: vp8.h:247
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:198
VP8intmv mv_max
Definition: vp8.h:154
uint8_t feature_enabled[4]
Macroblock features (VP7)
Definition: vp8.h:298
int8_t mode[VP8_MVMODE_SPLIT+1]
filter strength adjustment for the following macroblock modes: [0-3] - i16x16 (always zero) [4] - i4x...
Definition: vp8.h:207
2 8x16 blocks (horizontal)
Definition: vp8.h:67
av_cold int ff_vp8_decode_free(AVCodecContext *avctx)
Definition: vp8.c:2697
static av_always_inline void backup_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int simple)
Definition: vp8.c:1462
Definition: vp9.h:84
#define AV_ZERO128(d)
Definition: intreadwrite.h:622
uint8_t pred8x8c[3]
Definition: vp8.h:237
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:456
static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avpkt)
Definition: ccaption_dec.c:722
discard all non reference
Definition: avcodec.h:780
static av_always_inline void vp78_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *curframe, VP8Frame *prev_frame, int is_vp7)
Definition: vp8.c:2194
uint8_t partitioning
Definition: vp8.h:85
void(* decode_mb_row_no_filter)(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.h:278
#define AV_ZERO64(d)
Definition: intreadwrite.h:618
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
static av_always_inline void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, uint8_t *segment, uint8_t *ref, int layout, int is_vp7)
Definition: vp8.c:1162
void(* vp8_v_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
Definition: vp8dsp.h:69
int16_t x
Definition: vp56.h:66
common internal api header.
static void vp8_get_quants(VP8Context *s)
Definition: vp8.c:305
if(ret< 0)
Definition: vf_mcdeint.c:282
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
#define LOCAL_ALIGNED(a, t, v,...)
Definition: internal.h:110
#define AV_COPY128(d, s)
Definition: intreadwrite.h:594
static int ref_frame(Vp3DecodeContext *s, ThreadFrame *dst, ThreadFrame *src)
Definition: vp3.c:1915
int wait_mb_pos
Definition: vp8.h:118
static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
Definition: vp8.c:69
uint8_t chroma_pred_mode
Definition: vp8.h:86
static double c[64]
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:92
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
Definition: os2threads.h:127
#define DC_129_PRED8x8
Definition: h264pred.h:86
enum AVDiscard skip_loop_filter
Skip loop filtering for selected frames.
Definition: avcodec.h:3250
static av_always_inline int vp8_rac_get(VP56RangeCoder *c)
Definition: vp56.h:304
int invisible
Definition: vp8.h:244
static av_always_inline int decode_splitmvs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int layout, int is_vp7)
Split motion vector prediction, 16.4.
Definition: vp8.c:835
static const SiprModeParam modes[MODE_COUNT]
Definition: sipr.c:69
int ref_count[3]
Definition: vp8.h:157
void * priv_data
Definition: avcodec.h:1691
int(* update_thread_context)(AVCodecContext *dst, const AVCodecContext *src)
Copy necessary context variables from a previous thread context to the current one.
Definition: avcodec.h:3597
static av_always_inline int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1542
void(* vp8_h_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:50
#define MODE_I4x4
Definition: vp8.h:57
static int vp7_calculate_mb_offset(int mb_x, int mb_y, int mb_width, int xoffset, int yoffset, int boundary, int *edge_x, int *edge_y)
The vp7 reference decoder uses a padding macroblock column (added to right edge of the frame) to guar...
Definition: vp8.c:917
#define XCHG(a, b, xchg)
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:3139
#define update_pos(td, mb_y, mb_x)
Definition: vp8.c:2275
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1699
#define HOR_VP8_PRED
unaveraged version of HOR_PRED, see
Definition: h264pred.h:63
VP8DSPContext vp8dsp
Definition: vp8.h:262
static av_always_inline int update_dimensions(VP8Context *s, int width, int height, int is_vp7)
Definition: vp8.c:150
int thread_nr
Definition: vp8.h:112
#define AV_ZERO32(d)
Definition: intreadwrite.h:614
static av_always_inline int vp78_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2483
static const double coeff[2][5]
Definition: vf_owdenoise.c:71
void(* vp8_idct_dc_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
Definition: vp8dsp.h:41
uint64_t layout
AVDiscard
Definition: avcodec.h:775
static av_unused int vp8_rac_get_nn(VP56RangeCoder *c)
Definition: vp56.h:354
void(* vp8_v_loop_filter16y_inner)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:58
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
#define av_uninit(x)
Definition: attributes.h:149
static av_always_inline void vp8_mc_luma(VP8Context *s, VP8ThreadData *td, uint8_t *dst, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3])
luma MC function
Definition: vp8.c:1725
static const uint8_t vp8_token_update_probs[4][8][3][NUM_DCT_TOKENS-1]
Definition: vp8data.h:540
static av_always_inline void filter_mb(VP8Context *s, uint8_t *dst[3], VP8FilterStrength *f, int mb_x, int mb_y, int is_vp7)
Definition: vp8.c:2073
#define av_freep(p)
#define IS_VP7
Definition: vp8dsp.h:103
#define av_always_inline
Definition: attributes.h:39
int8_t filter_level[4]
base loop filter level
Definition: vp8.h:169
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
static const int vp8_mode_contexts[6][4]
Definition: vp8data.h:118
static const uint8_t vp8_dct_cat1_prob[]
Definition: vp8data.h:342
#define FFSWAP(type, a, b)
Definition: common.h:99
uint8_t intra
Definition: vp8.h:233
static av_always_inline void vp8_decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
Definition: vp8.c:1027
uint8_t non_zero_count_cache[6][4]
This is the index plus one of the last non-zero coeff for each of the blocks in the current macrobloc...
Definition: vp8.h:104
uint8_t skip
Definition: vp8.h:80
void ff_vp8dsp_init(VP8DSPContext *c)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
static void vp78_reset_probability_tables(VP8Context *s)
Definition: vp8.c:367
This structure stores compressed data.
Definition: avcodec.h:1557
#define VP8_MVC_SIZE
Definition: vp8.c:393
static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:490
uint8_t profile
Definition: vp8.h:152
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1341
const uint8_t *const ff_vp8_dct_cat_prob[]
Definition: vp8data.h:362
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:252
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:956
VP8ThreadData * thread_data
Definition: vp8.h:137
Predicted.
Definition: avutil.h:267
int thread_mb_pos
Definition: vp8.h:117
2x2 blocks of 8x8px each
Definition: vp8.h:68
static av_always_inline void filter_mb_simple(VP8Context *s, uint8_t *dst, VP8FilterStrength *f, int mb_x, int mb_y)
Definition: vp8.c:2160
static const VP7MVPred vp7_mv_pred[VP7_MV_PRED_COUNT]
Definition: vp8data.h:69
static const uint16_t vp7_y2ac_qlookup[]
Definition: vp8data.h:816
static const uint8_t vp7_submv_prob[3]
Definition: vp8data.h:149
static av_always_inline int vp78_decode_init(AVCodecContext *avctx, int is_vp7)
Definition: vp8.c:2724
#define AV_WN64(p, v)
Definition: intreadwrite.h:380
uint8_t filter_level
Definition: vp8.h:74