FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vp8.c
Go to the documentation of this file.
1 /*
2  * VP7/VP8 compatible video decoder
3  *
4  * Copyright (C) 2010 David Conrad
5  * Copyright (C) 2010 Ronald S. Bultje
6  * Copyright (C) 2010 Fiona Glaser
7  * Copyright (C) 2012 Daniel Kang
8  * Copyright (C) 2014 Peter Ross
9  *
10  * This file is part of FFmpeg.
11  *
12  * FFmpeg is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU Lesser General Public
14  * License as published by the Free Software Foundation; either
15  * version 2.1 of the License, or (at your option) any later version.
16  *
17  * FFmpeg is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20  * Lesser General Public License for more details.
21  *
22  * You should have received a copy of the GNU Lesser General Public
23  * License along with FFmpeg; if not, write to the Free Software
24  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25  */
26 
27 #include "libavutil/imgutils.h"
28 
29 #include "avcodec.h"
30 #include "internal.h"
31 #include "mathops.h"
32 #include "rectangle.h"
33 #include "thread.h"
34 #include "vp8.h"
35 #include "vp8data.h"
36 
37 #if ARCH_ARM
38 # include "arm/vp8.h"
39 #endif
40 
41 #if CONFIG_VP7_DECODER && CONFIG_VP8_DECODER
42 #define VPX(vp7, f) (vp7 ? vp7_ ## f : vp8_ ## f)
43 #elif CONFIG_VP7_DECODER
44 #define VPX(vp7, f) vp7_ ## f
45 #else // CONFIG_VP8_DECODER
46 #define VPX(vp7, f) vp8_ ## f
47 #endif
48 
49 static void free_buffers(VP8Context *s)
50 {
51  int i;
52  if (s->thread_data)
53  for (i = 0; i < MAX_THREADS; i++) {
54 #if HAVE_THREADS
55  pthread_cond_destroy(&s->thread_data[i].cond);
57 #endif
59  }
60  av_freep(&s->thread_data);
63  av_freep(&s->top_nnz);
64  av_freep(&s->top_border);
65 
66  s->macroblocks = NULL;
67 }
68 
69 static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
70 {
71  int ret;
72  if ((ret = ff_thread_get_buffer(s->avctx, &f->tf,
73  ref ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
74  return ret;
75  if (!(f->seg_map = av_buffer_allocz(s->mb_width * s->mb_height))) {
77  return AVERROR(ENOMEM);
78  }
79  return 0;
80 }
81 
83 {
86 }
87 
88 #if CONFIG_VP8_DECODER
89 static int vp8_ref_frame(VP8Context *s, VP8Frame *dst, VP8Frame *src)
90 {
91  int ret;
92 
93  vp8_release_frame(s, dst);
94 
95  if ((ret = ff_thread_ref_frame(&dst->tf, &src->tf)) < 0)
96  return ret;
97  if (src->seg_map &&
98  !(dst->seg_map = av_buffer_ref(src->seg_map))) {
99  vp8_release_frame(s, dst);
100  return AVERROR(ENOMEM);
101  }
102 
103  return 0;
104 }
105 #endif /* CONFIG_VP8_DECODER */
106 
107 static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
108 {
109  VP8Context *s = avctx->priv_data;
110  int i;
111 
112  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
113  vp8_release_frame(s, &s->frames[i]);
114  memset(s->framep, 0, sizeof(s->framep));
115 
116  if (free_mem)
117  free_buffers(s);
118 }
119 
120 static void vp8_decode_flush(AVCodecContext *avctx)
121 {
122  vp8_decode_flush_impl(avctx, 0);
123 }
124 
126 {
127  VP8Frame *frame = NULL;
128  int i;
129 
130  // find a free buffer
131  for (i = 0; i < 5; i++)
132  if (&s->frames[i] != s->framep[VP56_FRAME_CURRENT] &&
133  &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
134  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
135  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2]) {
136  frame = &s->frames[i];
137  break;
138  }
139  if (i == 5) {
140  av_log(s->avctx, AV_LOG_FATAL, "Ran out of free frames!\n");
141  abort();
142  }
143  if (frame->tf.f->data[0])
144  vp8_release_frame(s, frame);
145 
146  return frame;
147 }
148 
149 static av_always_inline
150 int update_dimensions(VP8Context *s, int width, int height, int is_vp7)
151 {
152  AVCodecContext *avctx = s->avctx;
153  int i, ret;
154 
155  if (width != s->avctx->width || ((width+15)/16 != s->mb_width || (height+15)/16 != s->mb_height) && s->macroblocks_base ||
156  height != s->avctx->height) {
158 
159  ret = ff_set_dimensions(s->avctx, width, height);
160  if (ret < 0)
161  return ret;
162  }
163 
164  s->mb_width = (s->avctx->coded_width + 15) / 16;
165  s->mb_height = (s->avctx->coded_height + 15) / 16;
166 
167  s->mb_layout = is_vp7 || avctx->active_thread_type == FF_THREAD_SLICE &&
168  avctx->thread_count > 1;
169  if (!s->mb_layout) { // Frame threading and one thread
170  s->macroblocks_base = av_mallocz((s->mb_width + s->mb_height * 2 + 1) *
171  sizeof(*s->macroblocks));
173  } else // Sliced threading
174  s->macroblocks_base = av_mallocz((s->mb_width + 2) * (s->mb_height + 2) *
175  sizeof(*s->macroblocks));
176  s->top_nnz = av_mallocz(s->mb_width * sizeof(*s->top_nnz));
177  s->top_border = av_mallocz((s->mb_width + 1) * sizeof(*s->top_border));
179 
180  if (!s->macroblocks_base || !s->top_nnz || !s->top_border ||
181  !s->thread_data || (!s->intra4x4_pred_mode_top && !s->mb_layout)) {
182  free_buffers(s);
183  return AVERROR(ENOMEM);
184  }
185 
186  for (i = 0; i < MAX_THREADS; i++) {
188  av_mallocz(s->mb_width * sizeof(*s->thread_data[0].filter_strength));
189  if (!s->thread_data[i].filter_strength) {
190  free_buffers(s);
191  return AVERROR(ENOMEM);
192  }
193 #if HAVE_THREADS
194  pthread_mutex_init(&s->thread_data[i].lock, NULL);
195  pthread_cond_init(&s->thread_data[i].cond, NULL);
196 #endif
197  }
198 
199  s->macroblocks = s->macroblocks_base + 1;
200 
201  return 0;
202 }
203 
205 {
206  return update_dimensions(s, width, height, IS_VP7);
207 }
208 
210 {
211  return update_dimensions(s, width, height, IS_VP8);
212 }
213 
214 
216 {
217  VP56RangeCoder *c = &s->c;
218  int i;
219 
221 
222  if (vp8_rac_get(c)) { // update segment feature data
224 
225  for (i = 0; i < 4; i++)
227 
228  for (i = 0; i < 4; i++)
230  }
231  if (s->segmentation.update_map)
232  for (i = 0; i < 3; i++)
233  s->prob->segmentid[i] = vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
234 }
235 
237 {
238  VP56RangeCoder *c = &s->c;
239  int i;
240 
241  for (i = 0; i < 4; i++) {
242  if (vp8_rac_get(c)) {
243  s->lf_delta.ref[i] = vp8_rac_get_uint(c, 6);
244 
245  if (vp8_rac_get(c))
246  s->lf_delta.ref[i] = -s->lf_delta.ref[i];
247  }
248  }
249 
250  for (i = MODE_I4x4; i <= VP8_MVMODE_SPLIT; i++) {
251  if (vp8_rac_get(c)) {
252  s->lf_delta.mode[i] = vp8_rac_get_uint(c, 6);
253 
254  if (vp8_rac_get(c))
255  s->lf_delta.mode[i] = -s->lf_delta.mode[i];
256  }
257  }
258 }
259 
260 static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
261 {
262  const uint8_t *sizes = buf;
263  int i;
264  int ret;
265 
266  s->num_coeff_partitions = 1 << vp8_rac_get_uint(&s->c, 2);
267 
268  buf += 3 * (s->num_coeff_partitions - 1);
269  buf_size -= 3 * (s->num_coeff_partitions - 1);
270  if (buf_size < 0)
271  return -1;
272 
273  for (i = 0; i < s->num_coeff_partitions - 1; i++) {
274  int size = AV_RL24(sizes + 3 * i);
275  if (buf_size - size < 0)
276  return -1;
277 
278  ret = ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, size);
279  if (ret < 0)
280  return ret;
281  buf += size;
282  buf_size -= size;
283  }
284  return ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, buf_size);
285 }
286 
287 static void vp7_get_quants(VP8Context *s)
288 {
289  VP56RangeCoder *c = &s->c;
290 
291  int yac_qi = vp8_rac_get_uint(c, 7);
292  int ydc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
293  int y2dc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
294  int y2ac_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
295  int uvdc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
296  int uvac_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
297 
298  s->qmat[0].luma_qmul[0] = vp7_ydc_qlookup[ydc_qi];
299  s->qmat[0].luma_qmul[1] = vp7_yac_qlookup[yac_qi];
300  s->qmat[0].luma_dc_qmul[0] = vp7_y2dc_qlookup[y2dc_qi];
301  s->qmat[0].luma_dc_qmul[1] = vp7_y2ac_qlookup[y2ac_qi];
302  s->qmat[0].chroma_qmul[0] = FFMIN(vp7_ydc_qlookup[uvdc_qi], 132);
303  s->qmat[0].chroma_qmul[1] = vp7_yac_qlookup[uvac_qi];
304 }
305 
306 static void vp8_get_quants(VP8Context *s)
307 {
308  VP56RangeCoder *c = &s->c;
309  int i, base_qi;
310 
311  int yac_qi = vp8_rac_get_uint(c, 7);
312  int ydc_delta = vp8_rac_get_sint(c, 4);
313  int y2dc_delta = vp8_rac_get_sint(c, 4);
314  int y2ac_delta = vp8_rac_get_sint(c, 4);
315  int uvdc_delta = vp8_rac_get_sint(c, 4);
316  int uvac_delta = vp8_rac_get_sint(c, 4);
317 
318  for (i = 0; i < 4; i++) {
319  if (s->segmentation.enabled) {
320  base_qi = s->segmentation.base_quant[i];
321  if (!s->segmentation.absolute_vals)
322  base_qi += yac_qi;
323  } else
324  base_qi = yac_qi;
325 
326  s->qmat[i].luma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + ydc_delta, 7)];
327  s->qmat[i].luma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi, 7)];
328  s->qmat[i].luma_dc_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + y2dc_delta, 7)] * 2;
329  /* 101581>>16 is equivalent to 155/100 */
330  s->qmat[i].luma_dc_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + y2ac_delta, 7)] * 101581 >> 16;
331  s->qmat[i].chroma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + uvdc_delta, 7)];
332  s->qmat[i].chroma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + uvac_delta, 7)];
333 
334  s->qmat[i].luma_dc_qmul[1] = FFMAX(s->qmat[i].luma_dc_qmul[1], 8);
335  s->qmat[i].chroma_qmul[0] = FFMIN(s->qmat[i].chroma_qmul[0], 132);
336  }
337 }
338 
339 /**
340  * Determine which buffers golden and altref should be updated with after this frame.
341  * The spec isn't clear here, so I'm going by my understanding of what libvpx does
342  *
343  * Intra frames update all 3 references
344  * Inter frames update VP56_FRAME_PREVIOUS if the update_last flag is set
345  * If the update (golden|altref) flag is set, it's updated with the current frame
346  * if update_last is set, and VP56_FRAME_PREVIOUS otherwise.
347  * If the flag is not set, the number read means:
348  * 0: no update
349  * 1: VP56_FRAME_PREVIOUS
350  * 2: update golden with altref, or update altref with golden
351  */
353 {
354  VP56RangeCoder *c = &s->c;
355 
356  if (update)
357  return VP56_FRAME_CURRENT;
358 
359  switch (vp8_rac_get_uint(c, 2)) {
360  case 1:
361  return VP56_FRAME_PREVIOUS;
362  case 2:
364  }
365  return VP56_FRAME_NONE;
366 }
367 
369 {
370  int i, j;
371  for (i = 0; i < 4; i++)
372  for (j = 0; j < 16; j++)
373  memcpy(s->prob->token[i][j], vp8_token_default_probs[i][vp8_coeff_band[j]],
374  sizeof(s->prob->token[i][j]));
375 }
376 
378 {
379  VP56RangeCoder *c = &s->c;
380  int i, j, k, l, m;
381 
382  for (i = 0; i < 4; i++)
383  for (j = 0; j < 8; j++)
384  for (k = 0; k < 3; k++)
385  for (l = 0; l < NUM_DCT_TOKENS-1; l++)
387  int prob = vp8_rac_get_uint(c, 8);
388  for (m = 0; vp8_coeff_band_indexes[j][m] >= 0; m++)
389  s->prob->token[i][vp8_coeff_band_indexes[j][m]][k][l] = prob;
390  }
391 }
392 
393 #define VP7_MVC_SIZE 17
394 #define VP8_MVC_SIZE 19
395 
397  int mvc_size)
398 {
399  VP56RangeCoder *c = &s->c;
400  int i, j;
401 
402  if (vp8_rac_get(c))
403  for (i = 0; i < 4; i++)
404  s->prob->pred16x16[i] = vp8_rac_get_uint(c, 8);
405  if (vp8_rac_get(c))
406  for (i = 0; i < 3; i++)
407  s->prob->pred8x8c[i] = vp8_rac_get_uint(c, 8);
408 
409  // 17.2 MV probability update
410  for (i = 0; i < 2; i++)
411  for (j = 0; j < mvc_size; j++)
413  s->prob->mvc[i][j] = vp8_rac_get_nn(c);
414 }
415 
416 static void update_refs(VP8Context *s)
417 {
418  VP56RangeCoder *c = &s->c;
419 
420  int update_golden = vp8_rac_get(c);
421  int update_altref = vp8_rac_get(c);
422 
423  s->update_golden = ref_to_update(s, update_golden, VP56_FRAME_GOLDEN);
424  s->update_altref = ref_to_update(s, update_altref, VP56_FRAME_GOLDEN2);
425 }
426 
427 static void copy_chroma(AVFrame *dst, AVFrame *src, int width, int height)
428 {
429  int i, j;
430 
431  for (j = 1; j < 3; j++) {
432  for (i = 0; i < height / 2; i++)
433  memcpy(dst->data[j] + i * dst->linesize[j],
434  src->data[j] + i * src->linesize[j], width / 2);
435  }
436 }
437 
438 static void fade(uint8_t *dst, ptrdiff_t dst_linesize,
439  const uint8_t *src, ptrdiff_t src_linesize,
440  int width, int height,
441  int alpha, int beta)
442 {
443  int i, j;
444  for (j = 0; j < height; j++) {
445  for (i = 0; i < width; i++) {
446  uint8_t y = src[j * src_linesize + i];
447  dst[j * dst_linesize + i] = av_clip_uint8(y + ((y * beta) >> 8) + alpha);
448  }
449  }
450 }
451 
453 {
454  int alpha = (int8_t) vp8_rac_get_uint(c, 8);
455  int beta = (int8_t) vp8_rac_get_uint(c, 8);
456  int ret;
457 
458  if (!s->keyframe && (alpha || beta)) {
459  int width = s->mb_width * 16;
460  int height = s->mb_height * 16;
461  AVFrame *src, *dst;
462 
463  if (!s->framep[VP56_FRAME_PREVIOUS] ||
464  !s->framep[VP56_FRAME_GOLDEN]) {
465  av_log(s->avctx, AV_LOG_WARNING, "Discarding interframe without a prior keyframe!\n");
466  return AVERROR_INVALIDDATA;
467  }
468 
469  dst =
470  src = s->framep[VP56_FRAME_PREVIOUS]->tf.f;
471 
472  /* preserve the golden frame, write a new previous frame */
475  if ((ret = vp8_alloc_frame(s, s->framep[VP56_FRAME_PREVIOUS], 1)) < 0)
476  return ret;
477 
478  dst = s->framep[VP56_FRAME_PREVIOUS]->tf.f;
479 
480  copy_chroma(dst, src, width, height);
481  }
482 
483  fade(dst->data[0], dst->linesize[0],
484  src->data[0], src->linesize[0],
485  width, height, alpha, beta);
486  }
487 
488  return 0;
489 }
490 
491 static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
492 {
493  VP56RangeCoder *c = &s->c;
494  int part1_size, hscale, vscale, i, j, ret;
495  int width = s->avctx->width;
496  int height = s->avctx->height;
497 
498  if (buf_size < 4) {
499  return AVERROR_INVALIDDATA;
500  }
501 
502  s->profile = (buf[0] >> 1) & 7;
503  if (s->profile > 1) {
504  avpriv_request_sample(s->avctx, "Unknown profile %d", s->profile);
505  return AVERROR_INVALIDDATA;
506  }
507 
508  s->keyframe = !(buf[0] & 1);
509  s->invisible = 0;
510  part1_size = AV_RL24(buf) >> 4;
511 
512  if (buf_size < 4 - s->profile + part1_size) {
513  av_log(s->avctx, AV_LOG_ERROR, "Buffer size %d is too small, needed : %d\n", buf_size, 4 - s->profile + part1_size);
514  return AVERROR_INVALIDDATA;
515  }
516 
517  buf += 4 - s->profile;
518  buf_size -= 4 - s->profile;
519 
520  memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_epel_pixels_tab, sizeof(s->put_pixels_tab));
521 
522  ret = ff_vp56_init_range_decoder(c, buf, part1_size);
523  if (ret < 0)
524  return ret;
525  buf += part1_size;
526  buf_size -= part1_size;
527 
528  /* A. Dimension information (keyframes only) */
529  if (s->keyframe) {
530  width = vp8_rac_get_uint(c, 12);
531  height = vp8_rac_get_uint(c, 12);
532  hscale = vp8_rac_get_uint(c, 2);
533  vscale = vp8_rac_get_uint(c, 2);
534  if (hscale || vscale)
535  avpriv_request_sample(s->avctx, "Upscaling");
536 
540  sizeof(s->prob->pred16x16));
542  sizeof(s->prob->pred8x8c));
543  for (i = 0; i < 2; i++)
544  memcpy(s->prob->mvc[i], vp7_mv_default_prob[i],
545  sizeof(vp7_mv_default_prob[i]));
546  memset(&s->segmentation, 0, sizeof(s->segmentation));
547  memset(&s->lf_delta, 0, sizeof(s->lf_delta));
548  memcpy(s->prob[0].scan, ff_zigzag_scan, sizeof(s->prob[0].scan));
549  }
550 
551  if (s->keyframe || s->profile > 0)
552  memset(s->inter_dc_pred, 0 , sizeof(s->inter_dc_pred));
553 
554  /* B. Decoding information for all four macroblock-level features */
555  for (i = 0; i < 4; i++) {
556  s->feature_enabled[i] = vp8_rac_get(c);
557  if (s->feature_enabled[i]) {
559 
560  for (j = 0; j < 3; j++)
561  s->feature_index_prob[i][j] =
562  vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
563 
564  if (vp7_feature_value_size[s->profile][i])
565  for (j = 0; j < 4; j++)
566  s->feature_value[i][j] =
568  }
569  }
570 
571  s->segmentation.enabled = 0;
572  s->segmentation.update_map = 0;
573  s->lf_delta.enabled = 0;
574 
575  s->num_coeff_partitions = 1;
576  ret = ff_vp56_init_range_decoder(&s->coeff_partition[0], buf, buf_size);
577  if (ret < 0)
578  return ret;
579 
580  if (!s->macroblocks_base || /* first frame */
581  width != s->avctx->width || height != s->avctx->height ||
582  (width + 15) / 16 != s->mb_width || (height + 15) / 16 != s->mb_height) {
583  if ((ret = vp7_update_dimensions(s, width, height)) < 0)
584  return ret;
585  }
586 
587  /* C. Dequantization indices */
588  vp7_get_quants(s);
589 
590  /* D. Golden frame update flag (a Flag) for interframes only */
591  if (!s->keyframe) {
594  }
595 
596  s->update_last = 1;
597  s->update_probabilities = 1;
598  s->fade_present = 1;
599 
600  if (s->profile > 0) {
602  if (!s->update_probabilities)
603  s->prob[1] = s->prob[0];
604 
605  if (!s->keyframe)
606  s->fade_present = vp8_rac_get(c);
607  }
608 
609  /* E. Fading information for previous frame */
610  if (s->fade_present && vp8_rac_get(c)) {
611  if ((ret = vp7_fade_frame(s ,c)) < 0)
612  return ret;
613  }
614 
615  /* F. Loop filter type */
616  if (!s->profile)
617  s->filter.simple = vp8_rac_get(c);
618 
619  /* G. DCT coefficient ordering specification */
620  if (vp8_rac_get(c))
621  for (i = 1; i < 16; i++)
622  s->prob[0].scan[i] = ff_zigzag_scan[vp8_rac_get_uint(c, 4)];
623 
624  /* H. Loop filter levels */
625  if (s->profile > 0)
626  s->filter.simple = vp8_rac_get(c);
627  s->filter.level = vp8_rac_get_uint(c, 6);
628  s->filter.sharpness = vp8_rac_get_uint(c, 3);
629 
630  /* I. DCT coefficient probability update; 13.3 Token Probability Updates */
632 
633  s->mbskip_enabled = 0;
634 
635  /* J. The remaining frame header data occurs ONLY FOR INTERFRAMES */
636  if (!s->keyframe) {
637  s->prob->intra = vp8_rac_get_uint(c, 8);
638  s->prob->last = vp8_rac_get_uint(c, 8);
640  }
641 
642  return 0;
643 }
644 
645 static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
646 {
647  VP56RangeCoder *c = &s->c;
648  int header_size, hscale, vscale, ret;
649  int width = s->avctx->width;
650  int height = s->avctx->height;
651 
652  if (buf_size < 3) {
653  av_log(s->avctx, AV_LOG_ERROR, "Insufficent data (%d) for header\n", buf_size);
654  return AVERROR_INVALIDDATA;
655  }
656 
657  s->keyframe = !(buf[0] & 1);
658  s->profile = (buf[0]>>1) & 7;
659  s->invisible = !(buf[0] & 0x10);
660  header_size = AV_RL24(buf) >> 5;
661  buf += 3;
662  buf_size -= 3;
663 
664  if (s->profile > 3)
665  av_log(s->avctx, AV_LOG_WARNING, "Unknown profile %d\n", s->profile);
666 
667  if (!s->profile)
669  sizeof(s->put_pixels_tab));
670  else // profile 1-3 use bilinear, 4+ aren't defined so whatever
672  sizeof(s->put_pixels_tab));
673 
674  if (header_size > buf_size - 7 * s->keyframe) {
675  av_log(s->avctx, AV_LOG_ERROR, "Header size larger than data provided\n");
676  return AVERROR_INVALIDDATA;
677  }
678 
679  if (s->keyframe) {
680  if (AV_RL24(buf) != 0x2a019d) {
682  "Invalid start code 0x%x\n", AV_RL24(buf));
683  return AVERROR_INVALIDDATA;
684  }
685  width = AV_RL16(buf + 3) & 0x3fff;
686  height = AV_RL16(buf + 5) & 0x3fff;
687  hscale = buf[4] >> 6;
688  vscale = buf[6] >> 6;
689  buf += 7;
690  buf_size -= 7;
691 
692  if (hscale || vscale)
693  avpriv_request_sample(s->avctx, "Upscaling");
694 
698  sizeof(s->prob->pred16x16));
700  sizeof(s->prob->pred8x8c));
701  memcpy(s->prob->mvc, vp8_mv_default_prob,
702  sizeof(s->prob->mvc));
703  memset(&s->segmentation, 0, sizeof(s->segmentation));
704  memset(&s->lf_delta, 0, sizeof(s->lf_delta));
705  }
706 
707  ret = ff_vp56_init_range_decoder(c, buf, header_size);
708  if (ret < 0)
709  return ret;
710  buf += header_size;
711  buf_size -= header_size;
712 
713  if (s->keyframe) {
714  s->colorspace = vp8_rac_get(c);
715  if (s->colorspace)
716  av_log(s->avctx, AV_LOG_WARNING, "Unspecified colorspace\n");
717  s->fullrange = vp8_rac_get(c);
718  }
719 
720  if ((s->segmentation.enabled = vp8_rac_get(c)))
722  else
723  s->segmentation.update_map = 0; // FIXME: move this to some init function?
724 
725  s->filter.simple = vp8_rac_get(c);
726  s->filter.level = vp8_rac_get_uint(c, 6);
727  s->filter.sharpness = vp8_rac_get_uint(c, 3);
728 
729  if ((s->lf_delta.enabled = vp8_rac_get(c)))
730  if (vp8_rac_get(c))
731  update_lf_deltas(s);
732 
733  if (setup_partitions(s, buf, buf_size)) {
734  av_log(s->avctx, AV_LOG_ERROR, "Invalid partitions\n");
735  return AVERROR_INVALIDDATA;
736  }
737 
738  if (!s->macroblocks_base || /* first frame */
739  width != s->avctx->width || height != s->avctx->height ||
740  (width+15)/16 != s->mb_width || (height+15)/16 != s->mb_height)
741  if ((ret = vp8_update_dimensions(s, width, height)) < 0)
742  return ret;
743 
744  vp8_get_quants(s);
745 
746  if (!s->keyframe) {
747  update_refs(s);
749  s->sign_bias[VP56_FRAME_GOLDEN2 /* altref */] = vp8_rac_get(c);
750  }
751 
752  // if we aren't saving this frame's probabilities for future frames,
753  // make a copy of the current probabilities
754  if (!(s->update_probabilities = vp8_rac_get(c)))
755  s->prob[1] = s->prob[0];
756 
757  s->update_last = s->keyframe || vp8_rac_get(c);
758 
760 
761  if ((s->mbskip_enabled = vp8_rac_get(c)))
762  s->prob->mbskip = vp8_rac_get_uint(c, 8);
763 
764  if (!s->keyframe) {
765  s->prob->intra = vp8_rac_get_uint(c, 8);
766  s->prob->last = vp8_rac_get_uint(c, 8);
767  s->prob->golden = vp8_rac_get_uint(c, 8);
769  }
770 
771  return 0;
772 }
773 
774 static av_always_inline
775 void clamp_mv(VP8mvbounds *s, VP56mv *dst, const VP56mv *src)
776 {
777  dst->x = av_clip(src->x, av_clip(s->mv_min.x, INT16_MIN, INT16_MAX),
778  av_clip(s->mv_max.x, INT16_MIN, INT16_MAX));
779  dst->y = av_clip(src->y, av_clip(s->mv_min.y, INT16_MIN, INT16_MAX),
780  av_clip(s->mv_max.y, INT16_MIN, INT16_MAX));
781 }
782 
783 /**
784  * Motion vector coding, 17.1.
785  */
787 {
788  int bit, x = 0;
789 
790  if (vp56_rac_get_prob_branchy(c, p[0])) {
791  int i;
792 
793  for (i = 0; i < 3; i++)
794  x += vp56_rac_get_prob(c, p[9 + i]) << i;
795  for (i = (vp7 ? 7 : 9); i > 3; i--)
796  x += vp56_rac_get_prob(c, p[9 + i]) << i;
797  if (!(x & (vp7 ? 0xF0 : 0xFFF0)) || vp56_rac_get_prob(c, p[12]))
798  x += 8;
799  } else {
800  // small_mvtree
801  const uint8_t *ps = p + 2;
802  bit = vp56_rac_get_prob(c, *ps);
803  ps += 1 + 3 * bit;
804  x += 4 * bit;
805  bit = vp56_rac_get_prob(c, *ps);
806  ps += 1 + bit;
807  x += 2 * bit;
808  x += vp56_rac_get_prob(c, *ps);
809  }
810 
811  return (x && vp56_rac_get_prob(c, p[1])) ? -x : x;
812 }
813 
815 {
816  return read_mv_component(c, p, 1);
817 }
818 
820 {
821  return read_mv_component(c, p, 0);
822 }
823 
824 static av_always_inline
825 const uint8_t *get_submv_prob(uint32_t left, uint32_t top, int is_vp7)
826 {
827  if (is_vp7)
828  return vp7_submv_prob;
829 
830  if (left == top)
831  return vp8_submv_prob[4 - !!left];
832  if (!top)
833  return vp8_submv_prob[2];
834  return vp8_submv_prob[1 - !!left];
835 }
836 
837 /**
838  * Split motion vector prediction, 16.4.
839  * @returns the number of motion vectors parsed (2, 4 or 16)
840  */
841 static av_always_inline
843  int layout, int is_vp7)
844 {
845  int part_idx;
846  int n, num;
847  VP8Macroblock *top_mb;
848  VP8Macroblock *left_mb = &mb[-1];
849  const uint8_t *mbsplits_left = vp8_mbsplits[left_mb->partitioning];
850  const uint8_t *mbsplits_top, *mbsplits_cur, *firstidx;
851  VP56mv *top_mv;
852  VP56mv *left_mv = left_mb->bmv;
853  VP56mv *cur_mv = mb->bmv;
854 
855  if (!layout) // layout is inlined, s->mb_layout is not
856  top_mb = &mb[2];
857  else
858  top_mb = &mb[-s->mb_width - 1];
859  mbsplits_top = vp8_mbsplits[top_mb->partitioning];
860  top_mv = top_mb->bmv;
861 
865  else
866  part_idx = VP8_SPLITMVMODE_8x8;
867  } else {
868  part_idx = VP8_SPLITMVMODE_4x4;
869  }
870 
871  num = vp8_mbsplit_count[part_idx];
872  mbsplits_cur = vp8_mbsplits[part_idx],
873  firstidx = vp8_mbfirstidx[part_idx];
874  mb->partitioning = part_idx;
875 
876  for (n = 0; n < num; n++) {
877  int k = firstidx[n];
878  uint32_t left, above;
879  const uint8_t *submv_prob;
880 
881  if (!(k & 3))
882  left = AV_RN32A(&left_mv[mbsplits_left[k + 3]]);
883  else
884  left = AV_RN32A(&cur_mv[mbsplits_cur[k - 1]]);
885  if (k <= 3)
886  above = AV_RN32A(&top_mv[mbsplits_top[k + 12]]);
887  else
888  above = AV_RN32A(&cur_mv[mbsplits_cur[k - 4]]);
889 
890  submv_prob = get_submv_prob(left, above, is_vp7);
891 
892  if (vp56_rac_get_prob_branchy(c, submv_prob[0])) {
893  if (vp56_rac_get_prob_branchy(c, submv_prob[1])) {
894  if (vp56_rac_get_prob_branchy(c, submv_prob[2])) {
895  mb->bmv[n].y = mb->mv.y +
896  read_mv_component(c, s->prob->mvc[0], is_vp7);
897  mb->bmv[n].x = mb->mv.x +
898  read_mv_component(c, s->prob->mvc[1], is_vp7);
899  } else {
900  AV_ZERO32(&mb->bmv[n]);
901  }
902  } else {
903  AV_WN32A(&mb->bmv[n], above);
904  }
905  } else {
906  AV_WN32A(&mb->bmv[n], left);
907  }
908  }
909 
910  return num;
911 }
912 
913 /**
914  * The vp7 reference decoder uses a padding macroblock column (added to right
915  * edge of the frame) to guard against illegal macroblock offsets. The
916  * algorithm has bugs that permit offsets to straddle the padding column.
917  * This function replicates those bugs.
918  *
919  * @param[out] edge_x macroblock x address
920  * @param[out] edge_y macroblock y address
921  *
922  * @return macroblock offset legal (boolean)
923  */
924 static int vp7_calculate_mb_offset(int mb_x, int mb_y, int mb_width,
925  int xoffset, int yoffset, int boundary,
926  int *edge_x, int *edge_y)
927 {
928  int vwidth = mb_width + 1;
929  int new = (mb_y + yoffset) * vwidth + mb_x + xoffset;
930  if (new < boundary || new % vwidth == vwidth - 1)
931  return 0;
932  *edge_y = new / vwidth;
933  *edge_x = new % vwidth;
934  return 1;
935 }
936 
937 static const VP56mv *get_bmv_ptr(const VP8Macroblock *mb, int subblock)
938 {
939  return &mb->bmv[mb->mode == VP8_MVMODE_SPLIT ? vp8_mbsplits[mb->partitioning][subblock] : 0];
940 }
941 
942 static av_always_inline
944  int mb_x, int mb_y, int layout)
945 {
946  VP8Macroblock *mb_edge[12];
947  enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR };
948  enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
949  int idx = CNT_ZERO;
950  VP56mv near_mv[3];
951  uint8_t cnt[3] = { 0 };
952  VP56RangeCoder *c = &s->c;
953  int i;
954 
955  AV_ZERO32(&near_mv[0]);
956  AV_ZERO32(&near_mv[1]);
957  AV_ZERO32(&near_mv[2]);
958 
959  for (i = 0; i < VP7_MV_PRED_COUNT; i++) {
960  const VP7MVPred * pred = &vp7_mv_pred[i];
961  int edge_x, edge_y;
962 
963  if (vp7_calculate_mb_offset(mb_x, mb_y, s->mb_width, pred->xoffset,
964  pred->yoffset, !s->profile, &edge_x, &edge_y)) {
965  VP8Macroblock *edge = mb_edge[i] = (s->mb_layout == 1)
966  ? s->macroblocks_base + 1 + edge_x +
967  (s->mb_width + 1) * (edge_y + 1)
968  : s->macroblocks + edge_x +
969  (s->mb_height - edge_y - 1) * 2;
970  uint32_t mv = AV_RN32A(get_bmv_ptr(edge, vp7_mv_pred[i].subblock));
971  if (mv) {
972  if (AV_RN32A(&near_mv[CNT_NEAREST])) {
973  if (mv == AV_RN32A(&near_mv[CNT_NEAREST])) {
974  idx = CNT_NEAREST;
975  } else if (AV_RN32A(&near_mv[CNT_NEAR])) {
976  if (mv != AV_RN32A(&near_mv[CNT_NEAR]))
977  continue;
978  idx = CNT_NEAR;
979  } else {
980  AV_WN32A(&near_mv[CNT_NEAR], mv);
981  idx = CNT_NEAR;
982  }
983  } else {
984  AV_WN32A(&near_mv[CNT_NEAREST], mv);
985  idx = CNT_NEAREST;
986  }
987  } else {
988  idx = CNT_ZERO;
989  }
990  } else {
991  idx = CNT_ZERO;
992  }
993  cnt[idx] += vp7_mv_pred[i].score;
994  }
995 
997 
998  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_ZERO]][0])) {
999  mb->mode = VP8_MVMODE_MV;
1000 
1001  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAREST]][1])) {
1002 
1003  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAR]][2])) {
1004 
1005  if (cnt[CNT_NEAREST] > cnt[CNT_NEAR])
1006  AV_WN32A(&mb->mv, cnt[CNT_ZERO] > cnt[CNT_NEAREST] ? 0 : AV_RN32A(&near_mv[CNT_NEAREST]));
1007  else
1008  AV_WN32A(&mb->mv, cnt[CNT_ZERO] > cnt[CNT_NEAR] ? 0 : AV_RN32A(&near_mv[CNT_NEAR]));
1009 
1010  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAR]][3])) {
1011  mb->mode = VP8_MVMODE_SPLIT;
1012  mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout, IS_VP7) - 1];
1013  } else {
1014  mb->mv.y += vp7_read_mv_component(c, s->prob->mvc[0]);
1015  mb->mv.x += vp7_read_mv_component(c, s->prob->mvc[1]);
1016  mb->bmv[0] = mb->mv;
1017  }
1018  } else {
1019  mb->mv = near_mv[CNT_NEAR];
1020  mb->bmv[0] = mb->mv;
1021  }
1022  } else {
1023  mb->mv = near_mv[CNT_NEAREST];
1024  mb->bmv[0] = mb->mv;
1025  }
1026  } else {
1027  mb->mode = VP8_MVMODE_ZERO;
1028  AV_ZERO32(&mb->mv);
1029  mb->bmv[0] = mb->mv;
1030  }
1031 }
1032 
1033 static av_always_inline
1035  int mb_x, int mb_y, int layout)
1036 {
1037  VP8Macroblock *mb_edge[3] = { 0 /* top */,
1038  mb - 1 /* left */,
1039  0 /* top-left */ };
1040  enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
1041  enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
1042  int idx = CNT_ZERO;
1043  int cur_sign_bias = s->sign_bias[mb->ref_frame];
1044  int8_t *sign_bias = s->sign_bias;
1045  VP56mv near_mv[4];
1046  uint8_t cnt[4] = { 0 };
1047  VP56RangeCoder *c = &s->c;
1048 
1049  if (!layout) { // layout is inlined (s->mb_layout is not)
1050  mb_edge[0] = mb + 2;
1051  mb_edge[2] = mb + 1;
1052  } else {
1053  mb_edge[0] = mb - s->mb_width - 1;
1054  mb_edge[2] = mb - s->mb_width - 2;
1055  }
1056 
1057  AV_ZERO32(&near_mv[0]);
1058  AV_ZERO32(&near_mv[1]);
1059  AV_ZERO32(&near_mv[2]);
1060 
1061  /* Process MB on top, left and top-left */
1062 #define MV_EDGE_CHECK(n) \
1063  { \
1064  VP8Macroblock *edge = mb_edge[n]; \
1065  int edge_ref = edge->ref_frame; \
1066  if (edge_ref != VP56_FRAME_CURRENT) { \
1067  uint32_t mv = AV_RN32A(&edge->mv); \
1068  if (mv) { \
1069  if (cur_sign_bias != sign_bias[edge_ref]) { \
1070  /* SWAR negate of the values in mv. */ \
1071  mv = ~mv; \
1072  mv = ((mv & 0x7fff7fff) + \
1073  0x00010001) ^ (mv & 0x80008000); \
1074  } \
1075  if (!n || mv != AV_RN32A(&near_mv[idx])) \
1076  AV_WN32A(&near_mv[++idx], mv); \
1077  cnt[idx] += 1 + (n != 2); \
1078  } else \
1079  cnt[CNT_ZERO] += 1 + (n != 2); \
1080  } \
1081  }
1082 
1083  MV_EDGE_CHECK(0)
1084  MV_EDGE_CHECK(1)
1085  MV_EDGE_CHECK(2)
1086 
1088  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_ZERO]][0])) {
1089  mb->mode = VP8_MVMODE_MV;
1090 
1091  /* If we have three distinct MVs, merge first and last if they're the same */
1092  if (cnt[CNT_SPLITMV] &&
1093  AV_RN32A(&near_mv[1 + VP8_EDGE_TOP]) == AV_RN32A(&near_mv[1 + VP8_EDGE_TOPLEFT]))
1094  cnt[CNT_NEAREST] += 1;
1095 
1096  /* Swap near and nearest if necessary */
1097  if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
1098  FFSWAP(uint8_t, cnt[CNT_NEAREST], cnt[CNT_NEAR]);
1099  FFSWAP( VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
1100  }
1101 
1102  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) {
1103  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAR]][2])) {
1104  /* Choose the best mv out of 0,0 and the nearest mv */
1105  clamp_mv(mv_bounds, &mb->mv, &near_mv[CNT_ZERO + (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])]);
1106  cnt[CNT_SPLITMV] = ((mb_edge[VP8_EDGE_LEFT]->mode == VP8_MVMODE_SPLIT) +
1107  (mb_edge[VP8_EDGE_TOP]->mode == VP8_MVMODE_SPLIT)) * 2 +
1108  (mb_edge[VP8_EDGE_TOPLEFT]->mode == VP8_MVMODE_SPLIT);
1109 
1110  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_SPLITMV]][3])) {
1111  mb->mode = VP8_MVMODE_SPLIT;
1112  mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout, IS_VP8) - 1];
1113  } else {
1114  mb->mv.y += vp8_read_mv_component(c, s->prob->mvc[0]);
1115  mb->mv.x += vp8_read_mv_component(c, s->prob->mvc[1]);
1116  mb->bmv[0] = mb->mv;
1117  }
1118  } else {
1119  clamp_mv(mv_bounds, &mb->mv, &near_mv[CNT_NEAR]);
1120  mb->bmv[0] = mb->mv;
1121  }
1122  } else {
1123  clamp_mv(mv_bounds, &mb->mv, &near_mv[CNT_NEAREST]);
1124  mb->bmv[0] = mb->mv;
1125  }
1126  } else {
1127  mb->mode = VP8_MVMODE_ZERO;
1128  AV_ZERO32(&mb->mv);
1129  mb->bmv[0] = mb->mv;
1130  }
1131 }
1132 
1133 static av_always_inline
1135  int mb_x, int keyframe, int layout)
1136 {
1137  uint8_t *intra4x4 = mb->intra4x4_pred_mode_mb;
1138 
1139  if (layout) {
1140  VP8Macroblock *mb_top = mb - s->mb_width - 1;
1141  memcpy(mb->intra4x4_pred_mode_top, mb_top->intra4x4_pred_mode_top, 4);
1142  }
1143  if (keyframe) {
1144  int x, y;
1145  uint8_t *top;
1146  uint8_t *const left = s->intra4x4_pred_mode_left;
1147  if (layout)
1148  top = mb->intra4x4_pred_mode_top;
1149  else
1150  top = s->intra4x4_pred_mode_top + 4 * mb_x;
1151  for (y = 0; y < 4; y++) {
1152  for (x = 0; x < 4; x++) {
1153  const uint8_t *ctx;
1154  ctx = vp8_pred4x4_prob_intra[top[x]][left[y]];
1155  *intra4x4 = vp8_rac_get_tree(c, vp8_pred4x4_tree, ctx);
1156  left[y] = top[x] = *intra4x4;
1157  intra4x4++;
1158  }
1159  }
1160  } else {
1161  int i;
1162  for (i = 0; i < 16; i++)
1163  intra4x4[i] = vp8_rac_get_tree(c, vp8_pred4x4_tree,
1165  }
1166 }
1167 
1168 static av_always_inline
1170  VP8Macroblock *mb, int mb_x, int mb_y,
1171  uint8_t *segment, uint8_t *ref, int layout, int is_vp7)
1172 {
1173  VP56RangeCoder *c = &s->c;
1174  static const char * const vp7_feature_name[] = { "q-index",
1175  "lf-delta",
1176  "partial-golden-update",
1177  "blit-pitch" };
1178  if (is_vp7) {
1179  int i;
1180  *segment = 0;
1181  for (i = 0; i < 4; i++) {
1182  if (s->feature_enabled[i]) {
1185  s->feature_index_prob[i]);
1187  "Feature %s present in macroblock (value 0x%x)\n",
1188  vp7_feature_name[i], s->feature_value[i][index]);
1189  }
1190  }
1191  }
1192  } else if (s->segmentation.update_map) {
1193  int bit = vp56_rac_get_prob(c, s->prob->segmentid[0]);
1194  *segment = vp56_rac_get_prob(c, s->prob->segmentid[1+bit]) + 2*bit;
1195  } else if (s->segmentation.enabled)
1196  *segment = ref ? *ref : *segment;
1197  mb->segment = *segment;
1198 
1199  mb->skip = s->mbskip_enabled ? vp56_rac_get_prob(c, s->prob->mbskip) : 0;
1200 
1201  if (s->keyframe) {
1204 
1205  if (mb->mode == MODE_I4x4) {
1206  decode_intra4x4_modes(s, c, mb, mb_x, 1, layout);
1207  } else {
1208  const uint32_t modes = (is_vp7 ? vp7_pred4x4_mode
1209  : vp8_pred4x4_mode)[mb->mode] * 0x01010101u;
1210  if (s->mb_layout)
1211  AV_WN32A(mb->intra4x4_pred_mode_top, modes);
1212  else
1213  AV_WN32A(s->intra4x4_pred_mode_top + 4 * mb_x, modes);
1214  AV_WN32A(s->intra4x4_pred_mode_left, modes);
1215  }
1216 
1220  } else if (vp56_rac_get_prob_branchy(c, s->prob->intra)) {
1221  // inter MB, 16.2
1222  if (vp56_rac_get_prob_branchy(c, s->prob->last))
1223  mb->ref_frame =
1224  (!is_vp7 && vp56_rac_get_prob(c, s->prob->golden)) ? VP56_FRAME_GOLDEN2 /* altref */
1226  else
1228  s->ref_count[mb->ref_frame - 1]++;
1229 
1230  // motion vectors, 16.3
1231  if (is_vp7)
1232  vp7_decode_mvs(s, mb, mb_x, mb_y, layout);
1233  else
1234  vp8_decode_mvs(s, mv_bounds, mb, mb_x, mb_y, layout);
1235  } else {
1236  // intra MB, 16.1
1238 
1239  if (mb->mode == MODE_I4x4)
1240  decode_intra4x4_modes(s, c, mb, mb_x, 0, layout);
1241 
1243  s->prob->pred8x8c);
1246  AV_ZERO32(&mb->bmv[0]);
1247  }
1248 }
1249 
1250 /**
1251  * @param r arithmetic bitstream reader context
1252  * @param block destination for block coefficients
1253  * @param probs probabilities to use when reading trees from the bitstream
1254  * @param i initial coeff index, 0 unless a separate DC block is coded
1255  * @param qmul array holding the dc/ac dequant factor at position 0/1
1256  *
1257  * @return 0 if no coeffs were decoded
1258  * otherwise, the index of the last coeff decoded plus one
1259  */
1260 static av_always_inline
1262  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1263  int i, uint8_t *token_prob, int16_t qmul[2],
1264  const uint8_t scan[16], int vp7)
1265 {
1266  VP56RangeCoder c = *r;
1267  goto skip_eob;
1268  do {
1269  int coeff;
1270 restart:
1271  if (!vp56_rac_get_prob_branchy(&c, token_prob[0])) // DCT_EOB
1272  break;
1273 
1274 skip_eob:
1275  if (!vp56_rac_get_prob_branchy(&c, token_prob[1])) { // DCT_0
1276  if (++i == 16)
1277  break; // invalid input; blocks should end with EOB
1278  token_prob = probs[i][0];
1279  if (vp7)
1280  goto restart;
1281  goto skip_eob;
1282  }
1283 
1284  if (!vp56_rac_get_prob_branchy(&c, token_prob[2])) { // DCT_1
1285  coeff = 1;
1286  token_prob = probs[i + 1][1];
1287  } else {
1288  if (!vp56_rac_get_prob_branchy(&c, token_prob[3])) { // DCT 2,3,4
1289  coeff = vp56_rac_get_prob_branchy(&c, token_prob[4]);
1290  if (coeff)
1291  coeff += vp56_rac_get_prob(&c, token_prob[5]);
1292  coeff += 2;
1293  } else {
1294  // DCT_CAT*
1295  if (!vp56_rac_get_prob_branchy(&c, token_prob[6])) {
1296  if (!vp56_rac_get_prob_branchy(&c, token_prob[7])) { // DCT_CAT1
1297  coeff = 5 + vp56_rac_get_prob(&c, vp8_dct_cat1_prob[0]);
1298  } else { // DCT_CAT2
1299  coeff = 7;
1300  coeff += vp56_rac_get_prob(&c, vp8_dct_cat2_prob[0]) << 1;
1301  coeff += vp56_rac_get_prob(&c, vp8_dct_cat2_prob[1]);
1302  }
1303  } else { // DCT_CAT3 and up
1304  int a = vp56_rac_get_prob(&c, token_prob[8]);
1305  int b = vp56_rac_get_prob(&c, token_prob[9 + a]);
1306  int cat = (a << 1) + b;
1307  coeff = 3 + (8 << cat);
1308  coeff += vp8_rac_get_coeff(&c, ff_vp8_dct_cat_prob[cat]);
1309  }
1310  }
1311  token_prob = probs[i + 1][2];
1312  }
1313  block[scan[i]] = (vp8_rac_get(&c) ? -coeff : coeff) * qmul[!!i];
1314  } while (++i < 16);
1315 
1316  *r = c;
1317  return i;
1318 }
1319 
1320 static av_always_inline
1321 int inter_predict_dc(int16_t block[16], int16_t pred[2])
1322 {
1323  int16_t dc = block[0];
1324  int ret = 0;
1325 
1326  if (pred[1] > 3) {
1327  dc += pred[0];
1328  ret = 1;
1329  }
1330 
1331  if (!pred[0] | !dc | ((int32_t)pred[0] ^ (int32_t)dc) >> 31) {
1332  block[0] = pred[0] = dc;
1333  pred[1] = 0;
1334  } else {
1335  if (pred[0] == dc)
1336  pred[1]++;
1337  block[0] = pred[0] = dc;
1338  }
1339 
1340  return ret;
1341 }
1342 
1344  int16_t block[16],
1345  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1346  int i, uint8_t *token_prob,
1347  int16_t qmul[2],
1348  const uint8_t scan[16])
1349 {
1350  return decode_block_coeffs_internal(r, block, probs, i,
1351  token_prob, qmul, scan, IS_VP7);
1352 }
1353 
1354 #ifndef vp8_decode_block_coeffs_internal
1356  int16_t block[16],
1357  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1358  int i, uint8_t *token_prob,
1359  int16_t qmul[2])
1360 {
1361  return decode_block_coeffs_internal(r, block, probs, i,
1362  token_prob, qmul, ff_zigzag_scan, IS_VP8);
1363 }
1364 #endif
1365 
1366 /**
1367  * @param c arithmetic bitstream reader context
1368  * @param block destination for block coefficients
1369  * @param probs probabilities to use when reading trees from the bitstream
1370  * @param i initial coeff index, 0 unless a separate DC block is coded
1371  * @param zero_nhood the initial prediction context for number of surrounding
1372  * all-zero blocks (only left/top, so 0-2)
1373  * @param qmul array holding the dc/ac dequant factor at position 0/1
1374  * @param scan scan pattern (VP7 only)
1375  *
1376  * @return 0 if no coeffs were decoded
1377  * otherwise, the index of the last coeff decoded plus one
1378  */
1379 static av_always_inline
1381  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1382  int i, int zero_nhood, int16_t qmul[2],
1383  const uint8_t scan[16], int vp7)
1384 {
1385  uint8_t *token_prob = probs[i][zero_nhood];
1386  if (!vp56_rac_get_prob_branchy(c, token_prob[0])) // DCT_EOB
1387  return 0;
1388  return vp7 ? vp7_decode_block_coeffs_internal(c, block, probs, i,
1389  token_prob, qmul, scan)
1390  : vp8_decode_block_coeffs_internal(c, block, probs, i,
1391  token_prob, qmul);
1392 }
1393 
1394 static av_always_inline
1396  VP8Macroblock *mb, uint8_t t_nnz[9], uint8_t l_nnz[9],
1397  int is_vp7)
1398 {
1399  int i, x, y, luma_start = 0, luma_ctx = 3;
1400  int nnz_pred, nnz, nnz_total = 0;
1401  int segment = mb->segment;
1402  int block_dc = 0;
1403 
1404  if (mb->mode != MODE_I4x4 && (is_vp7 || mb->mode != VP8_MVMODE_SPLIT)) {
1405  nnz_pred = t_nnz[8] + l_nnz[8];
1406 
1407  // decode DC values and do hadamard
1408  nnz = decode_block_coeffs(c, td->block_dc, s->prob->token[1], 0,
1409  nnz_pred, s->qmat[segment].luma_dc_qmul,
1410  ff_zigzag_scan, is_vp7);
1411  l_nnz[8] = t_nnz[8] = !!nnz;
1412 
1413  if (is_vp7 && mb->mode > MODE_I4x4) {
1414  nnz |= inter_predict_dc(td->block_dc,
1415  s->inter_dc_pred[mb->ref_frame - 1]);
1416  }
1417 
1418  if (nnz) {
1419  nnz_total += nnz;
1420  block_dc = 1;
1421  if (nnz == 1)
1422  s->vp8dsp.vp8_luma_dc_wht_dc(td->block, td->block_dc);
1423  else
1424  s->vp8dsp.vp8_luma_dc_wht(td->block, td->block_dc);
1425  }
1426  luma_start = 1;
1427  luma_ctx = 0;
1428  }
1429 
1430  // luma blocks
1431  for (y = 0; y < 4; y++)
1432  for (x = 0; x < 4; x++) {
1433  nnz_pred = l_nnz[y] + t_nnz[x];
1434  nnz = decode_block_coeffs(c, td->block[y][x],
1435  s->prob->token[luma_ctx],
1436  luma_start, nnz_pred,
1437  s->qmat[segment].luma_qmul,
1438  s->prob[0].scan, is_vp7);
1439  /* nnz+block_dc may be one more than the actual last index,
1440  * but we don't care */
1441  td->non_zero_count_cache[y][x] = nnz + block_dc;
1442  t_nnz[x] = l_nnz[y] = !!nnz;
1443  nnz_total += nnz;
1444  }
1445 
1446  // chroma blocks
1447  // TODO: what to do about dimensions? 2nd dim for luma is x,
1448  // but for chroma it's (y<<1)|x
1449  for (i = 4; i < 6; i++)
1450  for (y = 0; y < 2; y++)
1451  for (x = 0; x < 2; x++) {
1452  nnz_pred = l_nnz[i + 2 * y] + t_nnz[i + 2 * x];
1453  nnz = decode_block_coeffs(c, td->block[i][(y << 1) + x],
1454  s->prob->token[2], 0, nnz_pred,
1455  s->qmat[segment].chroma_qmul,
1456  s->prob[0].scan, is_vp7);
1457  td->non_zero_count_cache[i][(y << 1) + x] = nnz;
1458  t_nnz[i + 2 * x] = l_nnz[i + 2 * y] = !!nnz;
1459  nnz_total += nnz;
1460  }
1461 
1462  // if there were no coded coeffs despite the macroblock not being marked skip,
1463  // we MUST not do the inner loop filter and should not do IDCT
1464  // Since skip isn't used for bitstream prediction, just manually set it.
1465  if (!nnz_total)
1466  mb->skip = 1;
1467 }
1468 
1469 static av_always_inline
1470 void backup_mb_border(uint8_t *top_border, uint8_t *src_y,
1471  uint8_t *src_cb, uint8_t *src_cr,
1472  ptrdiff_t linesize, ptrdiff_t uvlinesize, int simple)
1473 {
1474  AV_COPY128(top_border, src_y + 15 * linesize);
1475  if (!simple) {
1476  AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
1477  AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
1478  }
1479 }
1480 
1481 static av_always_inline
1482 void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb,
1483  uint8_t *src_cr, ptrdiff_t linesize, ptrdiff_t uvlinesize, int mb_x,
1484  int mb_y, int mb_width, int simple, int xchg)
1485 {
1486  uint8_t *top_border_m1 = top_border - 32; // for TL prediction
1487  src_y -= linesize;
1488  src_cb -= uvlinesize;
1489  src_cr -= uvlinesize;
1490 
1491 #define XCHG(a, b, xchg) \
1492  do { \
1493  if (xchg) \
1494  AV_SWAP64(b, a); \
1495  else \
1496  AV_COPY64(b, a); \
1497  } while (0)
1498 
1499  XCHG(top_border_m1 + 8, src_y - 8, xchg);
1500  XCHG(top_border, src_y, xchg);
1501  XCHG(top_border + 8, src_y + 8, 1);
1502  if (mb_x < mb_width - 1)
1503  XCHG(top_border + 32, src_y + 16, 1);
1504 
1505  // only copy chroma for normal loop filter
1506  // or to initialize the top row to 127
1507  if (!simple || !mb_y) {
1508  XCHG(top_border_m1 + 16, src_cb - 8, xchg);
1509  XCHG(top_border_m1 + 24, src_cr - 8, xchg);
1510  XCHG(top_border + 16, src_cb, 1);
1511  XCHG(top_border + 24, src_cr, 1);
1512  }
1513 }
1514 
1515 static av_always_inline
1516 int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
1517 {
1518  if (!mb_x)
1519  return mb_y ? TOP_DC_PRED8x8 : DC_128_PRED8x8;
1520  else
1521  return mb_y ? mode : LEFT_DC_PRED8x8;
1522 }
1523 
1524 static av_always_inline
1525 int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y, int vp7)
1526 {
1527  if (!mb_x)
1528  return mb_y ? VERT_PRED8x8 : (vp7 ? DC_128_PRED8x8 : DC_129_PRED8x8);
1529  else
1530  return mb_y ? mode : HOR_PRED8x8;
1531 }
1532 
1533 static av_always_inline
1534 int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y, int vp7)
1535 {
1536  switch (mode) {
1537  case DC_PRED8x8:
1538  return check_dc_pred8x8_mode(mode, mb_x, mb_y);
1539  case VERT_PRED8x8:
1540  return !mb_y ? (vp7 ? DC_128_PRED8x8 : DC_127_PRED8x8) : mode;
1541  case HOR_PRED8x8:
1542  return !mb_x ? (vp7 ? DC_128_PRED8x8 : DC_129_PRED8x8) : mode;
1543  case PLANE_PRED8x8: /* TM */
1544  return check_tm_pred8x8_mode(mode, mb_x, mb_y, vp7);
1545  }
1546  return mode;
1547 }
1548 
1549 static av_always_inline
1550 int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y, int vp7)
1551 {
1552  if (!mb_x) {
1553  return mb_y ? VERT_VP8_PRED : (vp7 ? DC_128_PRED : DC_129_PRED);
1554  } else {
1555  return mb_y ? mode : HOR_VP8_PRED;
1556  }
1557 }
1558 
1559 static av_always_inline
1560 int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y,
1561  int *copy_buf, int vp7)
1562 {
1563  switch (mode) {
1564  case VERT_PRED:
1565  if (!mb_x && mb_y) {
1566  *copy_buf = 1;
1567  return mode;
1568  }
1569  /* fall-through */
1570  case DIAG_DOWN_LEFT_PRED:
1571  case VERT_LEFT_PRED:
1572  return !mb_y ? (vp7 ? DC_128_PRED : DC_127_PRED) : mode;
1573  case HOR_PRED:
1574  if (!mb_y) {
1575  *copy_buf = 1;
1576  return mode;
1577  }
1578  /* fall-through */
1579  case HOR_UP_PRED:
1580  return !mb_x ? (vp7 ? DC_128_PRED : DC_129_PRED) : mode;
1581  case TM_VP8_PRED:
1582  return check_tm_pred4x4_mode(mode, mb_x, mb_y, vp7);
1583  case DC_PRED: /* 4x4 DC doesn't use the same "H.264-style" exceptions
1584  * as 16x16/8x8 DC */
1585  case DIAG_DOWN_RIGHT_PRED:
1586  case VERT_RIGHT_PRED:
1587  case HOR_DOWN_PRED:
1588  if (!mb_y || !mb_x)
1589  *copy_buf = 1;
1590  return mode;
1591  }
1592  return mode;
1593 }
1594 
1595 static av_always_inline
1597  VP8Macroblock *mb, int mb_x, int mb_y, int is_vp7)
1598 {
1599  int x, y, mode, nnz;
1600  uint32_t tr;
1601 
1602  /* for the first row, we need to run xchg_mb_border to init the top edge
1603  * to 127 otherwise, skip it if we aren't going to deblock */
1604  if (mb_y && (s->deblock_filter || !mb_y) && td->thread_nr == 0)
1605  xchg_mb_border(s->top_border[mb_x + 1], dst[0], dst[1], dst[2],
1606  s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1607  s->filter.simple, 1);
1608 
1609  if (mb->mode < MODE_I4x4) {
1610  mode = check_intra_pred8x8_mode_emuedge(mb->mode, mb_x, mb_y, is_vp7);
1611  s->hpc.pred16x16[mode](dst[0], s->linesize);
1612  } else {
1613  uint8_t *ptr = dst[0];
1614  uint8_t *intra4x4 = mb->intra4x4_pred_mode_mb;
1615  const uint8_t lo = is_vp7 ? 128 : 127;
1616  const uint8_t hi = is_vp7 ? 128 : 129;
1617  uint8_t tr_top[4] = { lo, lo, lo, lo };
1618 
1619  // all blocks on the right edge of the macroblock use bottom edge
1620  // the top macroblock for their topright edge
1621  uint8_t *tr_right = ptr - s->linesize + 16;
1622 
1623  // if we're on the right edge of the frame, said edge is extended
1624  // from the top macroblock
1625  if (mb_y && mb_x == s->mb_width - 1) {
1626  tr = tr_right[-1] * 0x01010101u;
1627  tr_right = (uint8_t *) &tr;
1628  }
1629 
1630  if (mb->skip)
1632 
1633  for (y = 0; y < 4; y++) {
1634  uint8_t *topright = ptr + 4 - s->linesize;
1635  for (x = 0; x < 4; x++) {
1636  int copy = 0;
1637  ptrdiff_t linesize = s->linesize;
1638  uint8_t *dst = ptr + 4 * x;
1639  LOCAL_ALIGNED(4, uint8_t, copy_dst, [5 * 8]);
1640 
1641  if ((y == 0 || x == 3) && mb_y == 0) {
1642  topright = tr_top;
1643  } else if (x == 3)
1644  topright = tr_right;
1645 
1646  mode = check_intra_pred4x4_mode_emuedge(intra4x4[x], mb_x + x,
1647  mb_y + y, &copy, is_vp7);
1648  if (copy) {
1649  dst = copy_dst + 12;
1650  linesize = 8;
1651  if (!(mb_y + y)) {
1652  copy_dst[3] = lo;
1653  AV_WN32A(copy_dst + 4, lo * 0x01010101U);
1654  } else {
1655  AV_COPY32(copy_dst + 4, ptr + 4 * x - s->linesize);
1656  if (!(mb_x + x)) {
1657  copy_dst[3] = hi;
1658  } else {
1659  copy_dst[3] = ptr[4 * x - s->linesize - 1];
1660  }
1661  }
1662  if (!(mb_x + x)) {
1663  copy_dst[11] =
1664  copy_dst[19] =
1665  copy_dst[27] =
1666  copy_dst[35] = hi;
1667  } else {
1668  copy_dst[11] = ptr[4 * x - 1];
1669  copy_dst[19] = ptr[4 * x + s->linesize - 1];
1670  copy_dst[27] = ptr[4 * x + s->linesize * 2 - 1];
1671  copy_dst[35] = ptr[4 * x + s->linesize * 3 - 1];
1672  }
1673  }
1674  s->hpc.pred4x4[mode](dst, topright, linesize);
1675  if (copy) {
1676  AV_COPY32(ptr + 4 * x, copy_dst + 12);
1677  AV_COPY32(ptr + 4 * x + s->linesize, copy_dst + 20);
1678  AV_COPY32(ptr + 4 * x + s->linesize * 2, copy_dst + 28);
1679  AV_COPY32(ptr + 4 * x + s->linesize * 3, copy_dst + 36);
1680  }
1681 
1682  nnz = td->non_zero_count_cache[y][x];
1683  if (nnz) {
1684  if (nnz == 1)
1685  s->vp8dsp.vp8_idct_dc_add(ptr + 4 * x,
1686  td->block[y][x], s->linesize);
1687  else
1688  s->vp8dsp.vp8_idct_add(ptr + 4 * x,
1689  td->block[y][x], s->linesize);
1690  }
1691  topright += 4;
1692  }
1693 
1694  ptr += 4 * s->linesize;
1695  intra4x4 += 4;
1696  }
1697  }
1698 
1700  mb_x, mb_y, is_vp7);
1701  s->hpc.pred8x8[mode](dst[1], s->uvlinesize);
1702  s->hpc.pred8x8[mode](dst[2], s->uvlinesize);
1703 
1704  if (mb_y && (s->deblock_filter || !mb_y) && td->thread_nr == 0)
1705  xchg_mb_border(s->top_border[mb_x + 1], dst[0], dst[1], dst[2],
1706  s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1707  s->filter.simple, 0);
1708 }
1709 
1710 static const uint8_t subpel_idx[3][8] = {
1711  { 0, 1, 2, 1, 2, 1, 2, 1 }, // nr. of left extra pixels,
1712  // also function pointer index
1713  { 0, 3, 5, 3, 5, 3, 5, 3 }, // nr. of extra pixels required
1714  { 0, 2, 3, 2, 3, 2, 3, 2 }, // nr. of right extra pixels
1715 };
1716 
1717 /**
1718  * luma MC function
1719  *
1720  * @param s VP8 decoding context
1721  * @param dst target buffer for block data at block position
1722  * @param ref reference picture buffer at origin (0, 0)
1723  * @param mv motion vector (relative to block position) to get pixel data from
1724  * @param x_off horizontal position of block from origin (0, 0)
1725  * @param y_off vertical position of block from origin (0, 0)
1726  * @param block_w width of block (16, 8 or 4)
1727  * @param block_h height of block (always same as block_w)
1728  * @param width width of src/dst plane data
1729  * @param height height of src/dst plane data
1730  * @param linesize size of a single line of plane data, including padding
1731  * @param mc_func motion compensation function pointers (bilinear or sixtap MC)
1732  */
1733 static av_always_inline
1735  ThreadFrame *ref, const VP56mv *mv,
1736  int x_off, int y_off, int block_w, int block_h,
1737  int width, int height, ptrdiff_t linesize,
1738  vp8_mc_func mc_func[3][3])
1739 {
1740  uint8_t *src = ref->f->data[0];
1741 
1742  if (AV_RN32A(mv)) {
1743  ptrdiff_t src_linesize = linesize;
1744 
1745  int mx = (mv->x * 2) & 7, mx_idx = subpel_idx[0][mx];
1746  int my = (mv->y * 2) & 7, my_idx = subpel_idx[0][my];
1747 
1748  x_off += mv->x >> 2;
1749  y_off += mv->y >> 2;
1750 
1751  // edge emulation
1752  ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 4, 0);
1753  src += y_off * linesize + x_off;
1754  if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1755  y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1757  src - my_idx * linesize - mx_idx,
1758  EDGE_EMU_LINESIZE, linesize,
1759  block_w + subpel_idx[1][mx],
1760  block_h + subpel_idx[1][my],
1761  x_off - mx_idx, y_off - my_idx,
1762  width, height);
1763  src = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1764  src_linesize = EDGE_EMU_LINESIZE;
1765  }
1766  mc_func[my_idx][mx_idx](dst, linesize, src, src_linesize, block_h, mx, my);
1767  } else {
1768  ff_thread_await_progress(ref, (3 + y_off + block_h) >> 4, 0);
1769  mc_func[0][0](dst, linesize, src + y_off * linesize + x_off,
1770  linesize, block_h, 0, 0);
1771  }
1772 }
1773 
1774 /**
1775  * chroma MC function
1776  *
1777  * @param s VP8 decoding context
1778  * @param dst1 target buffer for block data at block position (U plane)
1779  * @param dst2 target buffer for block data at block position (V plane)
1780  * @param ref reference picture buffer at origin (0, 0)
1781  * @param mv motion vector (relative to block position) to get pixel data from
1782  * @param x_off horizontal position of block from origin (0, 0)
1783  * @param y_off vertical position of block from origin (0, 0)
1784  * @param block_w width of block (16, 8 or 4)
1785  * @param block_h height of block (always same as block_w)
1786  * @param width width of src/dst plane data
1787  * @param height height of src/dst plane data
1788  * @param linesize size of a single line of plane data, including padding
1789  * @param mc_func motion compensation function pointers (bilinear or sixtap MC)
1790  */
1791 static av_always_inline
1793  uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv,
1794  int x_off, int y_off, int block_w, int block_h,
1795  int width, int height, ptrdiff_t linesize,
1796  vp8_mc_func mc_func[3][3])
1797 {
1798  uint8_t *src1 = ref->f->data[1], *src2 = ref->f->data[2];
1799 
1800  if (AV_RN32A(mv)) {
1801  int mx = mv->x & 7, mx_idx = subpel_idx[0][mx];
1802  int my = mv->y & 7, my_idx = subpel_idx[0][my];
1803 
1804  x_off += mv->x >> 3;
1805  y_off += mv->y >> 3;
1806 
1807  // edge emulation
1808  src1 += y_off * linesize + x_off;
1809  src2 += y_off * linesize + x_off;
1810  ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 3, 0);
1811  if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1812  y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1814  src1 - my_idx * linesize - mx_idx,
1815  EDGE_EMU_LINESIZE, linesize,
1816  block_w + subpel_idx[1][mx],
1817  block_h + subpel_idx[1][my],
1818  x_off - mx_idx, y_off - my_idx, width, height);
1819  src1 = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1820  mc_func[my_idx][mx_idx](dst1, linesize, src1, EDGE_EMU_LINESIZE, block_h, mx, my);
1821 
1823  src2 - my_idx * linesize - mx_idx,
1824  EDGE_EMU_LINESIZE, linesize,
1825  block_w + subpel_idx[1][mx],
1826  block_h + subpel_idx[1][my],
1827  x_off - mx_idx, y_off - my_idx, width, height);
1828  src2 = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1829  mc_func[my_idx][mx_idx](dst2, linesize, src2, EDGE_EMU_LINESIZE, block_h, mx, my);
1830  } else {
1831  mc_func[my_idx][mx_idx](dst1, linesize, src1, linesize, block_h, mx, my);
1832  mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my);
1833  }
1834  } else {
1835  ff_thread_await_progress(ref, (3 + y_off + block_h) >> 3, 0);
1836  mc_func[0][0](dst1, linesize, src1 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1837  mc_func[0][0](dst2, linesize, src2 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1838  }
1839 }
1840 
1841 static av_always_inline
1843  ThreadFrame *ref_frame, int x_off, int y_off,
1844  int bx_off, int by_off, int block_w, int block_h,
1845  int width, int height, VP56mv *mv)
1846 {
1847  VP56mv uvmv = *mv;
1848 
1849  /* Y */
1850  vp8_mc_luma(s, td, dst[0] + by_off * s->linesize + bx_off,
1851  ref_frame, mv, x_off + bx_off, y_off + by_off,
1852  block_w, block_h, width, height, s->linesize,
1853  s->put_pixels_tab[block_w == 8]);
1854 
1855  /* U/V */
1856  if (s->profile == 3) {
1857  /* this block only applies VP8; it is safe to check
1858  * only the profile, as VP7 profile <= 1 */
1859  uvmv.x &= ~7;
1860  uvmv.y &= ~7;
1861  }
1862  x_off >>= 1;
1863  y_off >>= 1;
1864  bx_off >>= 1;
1865  by_off >>= 1;
1866  width >>= 1;
1867  height >>= 1;
1868  block_w >>= 1;
1869  block_h >>= 1;
1870  vp8_mc_chroma(s, td, dst[1] + by_off * s->uvlinesize + bx_off,
1871  dst[2] + by_off * s->uvlinesize + bx_off, ref_frame,
1872  &uvmv, x_off + bx_off, y_off + by_off,
1873  block_w, block_h, width, height, s->uvlinesize,
1874  s->put_pixels_tab[1 + (block_w == 4)]);
1875 }
1876 
1877 /* Fetch pixels for estimated mv 4 macroblocks ahead.
1878  * Optimized for 64-byte cache lines. Inspired by ffh264 prefetch_motion. */
1879 static av_always_inline
1880 void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
1881  int mb_xy, int ref)
1882 {
1883  /* Don't prefetch refs that haven't been used very often this frame. */
1884  if (s->ref_count[ref - 1] > (mb_xy >> 5)) {
1885  int x_off = mb_x << 4, y_off = mb_y << 4;
1886  int mx = (mb->mv.x >> 2) + x_off + 8;
1887  int my = (mb->mv.y >> 2) + y_off;
1888  uint8_t **src = s->framep[ref]->tf.f->data;
1889  int off = mx + (my + (mb_x & 3) * 4) * s->linesize + 64;
1890  /* For threading, a ff_thread_await_progress here might be useful, but
1891  * it actually slows down the decoder. Since a bad prefetch doesn't
1892  * generate bad decoder output, we don't run it here. */
1893  s->vdsp.prefetch(src[0] + off, s->linesize, 4);
1894  off = (mx >> 1) + ((my >> 1) + (mb_x & 7)) * s->uvlinesize + 64;
1895  s->vdsp.prefetch(src[1] + off, src[2] - src[1], 2);
1896  }
1897 }
1898 
1899 /**
1900  * Apply motion vectors to prediction buffer, chapter 18.
1901  */
1902 static av_always_inline
1904  VP8Macroblock *mb, int mb_x, int mb_y)
1905 {
1906  int x_off = mb_x << 4, y_off = mb_y << 4;
1907  int width = 16 * s->mb_width, height = 16 * s->mb_height;
1908  ThreadFrame *ref = &s->framep[mb->ref_frame]->tf;
1909  VP56mv *bmv = mb->bmv;
1910 
1911  switch (mb->partitioning) {
1912  case VP8_SPLITMVMODE_NONE:
1913  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1914  0, 0, 16, 16, width, height, &mb->mv);
1915  break;
1916  case VP8_SPLITMVMODE_4x4: {
1917  int x, y;
1918  VP56mv uvmv;
1919 
1920  /* Y */
1921  for (y = 0; y < 4; y++) {
1922  for (x = 0; x < 4; x++) {
1923  vp8_mc_luma(s, td, dst[0] + 4 * y * s->linesize + x * 4,
1924  ref, &bmv[4 * y + x],
1925  4 * x + x_off, 4 * y + y_off, 4, 4,
1926  width, height, s->linesize,
1927  s->put_pixels_tab[2]);
1928  }
1929  }
1930 
1931  /* U/V */
1932  x_off >>= 1;
1933  y_off >>= 1;
1934  width >>= 1;
1935  height >>= 1;
1936  for (y = 0; y < 2; y++) {
1937  for (x = 0; x < 2; x++) {
1938  uvmv.x = mb->bmv[2 * y * 4 + 2 * x ].x +
1939  mb->bmv[2 * y * 4 + 2 * x + 1].x +
1940  mb->bmv[(2 * y + 1) * 4 + 2 * x ].x +
1941  mb->bmv[(2 * y + 1) * 4 + 2 * x + 1].x;
1942  uvmv.y = mb->bmv[2 * y * 4 + 2 * x ].y +
1943  mb->bmv[2 * y * 4 + 2 * x + 1].y +
1944  mb->bmv[(2 * y + 1) * 4 + 2 * x ].y +
1945  mb->bmv[(2 * y + 1) * 4 + 2 * x + 1].y;
1946  uvmv.x = (uvmv.x + 2 + FF_SIGNBIT(uvmv.x)) >> 2;
1947  uvmv.y = (uvmv.y + 2 + FF_SIGNBIT(uvmv.y)) >> 2;
1948  if (s->profile == 3) {
1949  uvmv.x &= ~7;
1950  uvmv.y &= ~7;
1951  }
1952  vp8_mc_chroma(s, td, dst[1] + 4 * y * s->uvlinesize + x * 4,
1953  dst[2] + 4 * y * s->uvlinesize + x * 4, ref,
1954  &uvmv, 4 * x + x_off, 4 * y + y_off, 4, 4,
1955  width, height, s->uvlinesize,
1956  s->put_pixels_tab[2]);
1957  }
1958  }
1959  break;
1960  }
1961  case VP8_SPLITMVMODE_16x8:
1962  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1963  0, 0, 16, 8, width, height, &bmv[0]);
1964  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1965  0, 8, 16, 8, width, height, &bmv[1]);
1966  break;
1967  case VP8_SPLITMVMODE_8x16:
1968  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1969  0, 0, 8, 16, width, height, &bmv[0]);
1970  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1971  8, 0, 8, 16, width, height, &bmv[1]);
1972  break;
1973  case VP8_SPLITMVMODE_8x8:
1974  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1975  0, 0, 8, 8, width, height, &bmv[0]);
1976  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1977  8, 0, 8, 8, width, height, &bmv[1]);
1978  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1979  0, 8, 8, 8, width, height, &bmv[2]);
1980  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1981  8, 8, 8, 8, width, height, &bmv[3]);
1982  break;
1983  }
1984 }
1985 
1986 static av_always_inline
1988 {
1989  int x, y, ch;
1990 
1991  if (mb->mode != MODE_I4x4) {
1992  uint8_t *y_dst = dst[0];
1993  for (y = 0; y < 4; y++) {
1994  uint32_t nnz4 = AV_RL32(td->non_zero_count_cache[y]);
1995  if (nnz4) {
1996  if (nnz4 & ~0x01010101) {
1997  for (x = 0; x < 4; x++) {
1998  if ((uint8_t) nnz4 == 1)
1999  s->vp8dsp.vp8_idct_dc_add(y_dst + 4 * x,
2000  td->block[y][x],
2001  s->linesize);
2002  else if ((uint8_t) nnz4 > 1)
2003  s->vp8dsp.vp8_idct_add(y_dst + 4 * x,
2004  td->block[y][x],
2005  s->linesize);
2006  nnz4 >>= 8;
2007  if (!nnz4)
2008  break;
2009  }
2010  } else {
2011  s->vp8dsp.vp8_idct_dc_add4y(y_dst, td->block[y], s->linesize);
2012  }
2013  }
2014  y_dst += 4 * s->linesize;
2015  }
2016  }
2017 
2018  for (ch = 0; ch < 2; ch++) {
2019  uint32_t nnz4 = AV_RL32(td->non_zero_count_cache[4 + ch]);
2020  if (nnz4) {
2021  uint8_t *ch_dst = dst[1 + ch];
2022  if (nnz4 & ~0x01010101) {
2023  for (y = 0; y < 2; y++) {
2024  for (x = 0; x < 2; x++) {
2025  if ((uint8_t) nnz4 == 1)
2026  s->vp8dsp.vp8_idct_dc_add(ch_dst + 4 * x,
2027  td->block[4 + ch][(y << 1) + x],
2028  s->uvlinesize);
2029  else if ((uint8_t) nnz4 > 1)
2030  s->vp8dsp.vp8_idct_add(ch_dst + 4 * x,
2031  td->block[4 + ch][(y << 1) + x],
2032  s->uvlinesize);
2033  nnz4 >>= 8;
2034  if (!nnz4)
2035  goto chroma_idct_end;
2036  }
2037  ch_dst += 4 * s->uvlinesize;
2038  }
2039  } else {
2040  s->vp8dsp.vp8_idct_dc_add4uv(ch_dst, td->block[4 + ch], s->uvlinesize);
2041  }
2042  }
2043 chroma_idct_end:
2044  ;
2045  }
2046 }
2047 
2048 static av_always_inline
2050  VP8FilterStrength *f, int is_vp7)
2051 {
2052  int interior_limit, filter_level;
2053 
2054  if (s->segmentation.enabled) {
2055  filter_level = s->segmentation.filter_level[mb->segment];
2056  if (!s->segmentation.absolute_vals)
2057  filter_level += s->filter.level;
2058  } else
2059  filter_level = s->filter.level;
2060 
2061  if (s->lf_delta.enabled) {
2062  filter_level += s->lf_delta.ref[mb->ref_frame];
2063  filter_level += s->lf_delta.mode[mb->mode];
2064  }
2065 
2066  filter_level = av_clip_uintp2(filter_level, 6);
2067 
2068  interior_limit = filter_level;
2069  if (s->filter.sharpness) {
2070  interior_limit >>= (s->filter.sharpness + 3) >> 2;
2071  interior_limit = FFMIN(interior_limit, 9 - s->filter.sharpness);
2072  }
2073  interior_limit = FFMAX(interior_limit, 1);
2074 
2075  f->filter_level = filter_level;
2076  f->inner_limit = interior_limit;
2077  f->inner_filter = is_vp7 || !mb->skip || mb->mode == MODE_I4x4 ||
2078  mb->mode == VP8_MVMODE_SPLIT;
2079 }
2080 
2081 static av_always_inline
2083  int mb_x, int mb_y, int is_vp7)
2084 {
2085  int mbedge_lim, bedge_lim_y, bedge_lim_uv, hev_thresh;
2086  int filter_level = f->filter_level;
2087  int inner_limit = f->inner_limit;
2088  int inner_filter = f->inner_filter;
2089  ptrdiff_t linesize = s->linesize;
2090  ptrdiff_t uvlinesize = s->uvlinesize;
2091  static const uint8_t hev_thresh_lut[2][64] = {
2092  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
2093  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2094  3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
2095  3, 3, 3, 3 },
2096  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
2097  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2098  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2099  2, 2, 2, 2 }
2100  };
2101 
2102  if (!filter_level)
2103  return;
2104 
2105  if (is_vp7) {
2106  bedge_lim_y = filter_level;
2107  bedge_lim_uv = filter_level * 2;
2108  mbedge_lim = filter_level + 2;
2109  } else {
2110  bedge_lim_y =
2111  bedge_lim_uv = filter_level * 2 + inner_limit;
2112  mbedge_lim = bedge_lim_y + 4;
2113  }
2114 
2115  hev_thresh = hev_thresh_lut[s->keyframe][filter_level];
2116 
2117  if (mb_x) {
2118  s->vp8dsp.vp8_h_loop_filter16y(dst[0], linesize,
2119  mbedge_lim, inner_limit, hev_thresh);
2120  s->vp8dsp.vp8_h_loop_filter8uv(dst[1], dst[2], uvlinesize,
2121  mbedge_lim, inner_limit, hev_thresh);
2122  }
2123 
2124 #define H_LOOP_FILTER_16Y_INNER(cond) \
2125  if (cond && inner_filter) { \
2126  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 4, linesize, \
2127  bedge_lim_y, inner_limit, \
2128  hev_thresh); \
2129  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 8, linesize, \
2130  bedge_lim_y, inner_limit, \
2131  hev_thresh); \
2132  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 12, linesize, \
2133  bedge_lim_y, inner_limit, \
2134  hev_thresh); \
2135  s->vp8dsp.vp8_h_loop_filter8uv_inner(dst[1] + 4, dst[2] + 4, \
2136  uvlinesize, bedge_lim_uv, \
2137  inner_limit, hev_thresh); \
2138  }
2139 
2140  H_LOOP_FILTER_16Y_INNER(!is_vp7)
2141 
2142  if (mb_y) {
2143  s->vp8dsp.vp8_v_loop_filter16y(dst[0], linesize,
2144  mbedge_lim, inner_limit, hev_thresh);
2145  s->vp8dsp.vp8_v_loop_filter8uv(dst[1], dst[2], uvlinesize,
2146  mbedge_lim, inner_limit, hev_thresh);
2147  }
2148 
2149  if (inner_filter) {
2150  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 4 * linesize,
2151  linesize, bedge_lim_y,
2152  inner_limit, hev_thresh);
2153  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 8 * linesize,
2154  linesize, bedge_lim_y,
2155  inner_limit, hev_thresh);
2156  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 12 * linesize,
2157  linesize, bedge_lim_y,
2158  inner_limit, hev_thresh);
2159  s->vp8dsp.vp8_v_loop_filter8uv_inner(dst[1] + 4 * uvlinesize,
2160  dst[2] + 4 * uvlinesize,
2161  uvlinesize, bedge_lim_uv,
2162  inner_limit, hev_thresh);
2163  }
2164 
2165  H_LOOP_FILTER_16Y_INNER(is_vp7)
2166 }
2167 
2168 static av_always_inline
2170  int mb_x, int mb_y)
2171 {
2172  int mbedge_lim, bedge_lim;
2173  int filter_level = f->filter_level;
2174  int inner_limit = f->inner_limit;
2175  int inner_filter = f->inner_filter;
2176  ptrdiff_t linesize = s->linesize;
2177 
2178  if (!filter_level)
2179  return;
2180 
2181  bedge_lim = 2 * filter_level + inner_limit;
2182  mbedge_lim = bedge_lim + 4;
2183 
2184  if (mb_x)
2185  s->vp8dsp.vp8_h_loop_filter_simple(dst, linesize, mbedge_lim);
2186  if (inner_filter) {
2187  s->vp8dsp.vp8_h_loop_filter_simple(dst + 4, linesize, bedge_lim);
2188  s->vp8dsp.vp8_h_loop_filter_simple(dst + 8, linesize, bedge_lim);
2189  s->vp8dsp.vp8_h_loop_filter_simple(dst + 12, linesize, bedge_lim);
2190  }
2191 
2192  if (mb_y)
2193  s->vp8dsp.vp8_v_loop_filter_simple(dst, linesize, mbedge_lim);
2194  if (inner_filter) {
2195  s->vp8dsp.vp8_v_loop_filter_simple(dst + 4 * linesize, linesize, bedge_lim);
2196  s->vp8dsp.vp8_v_loop_filter_simple(dst + 8 * linesize, linesize, bedge_lim);
2197  s->vp8dsp.vp8_v_loop_filter_simple(dst + 12 * linesize, linesize, bedge_lim);
2198  }
2199 }
2200 
2201 #define MARGIN (16 << 2)
2202 static av_always_inline
2204  VP8Frame *prev_frame, int is_vp7)
2205 {
2206  VP8Context *s = avctx->priv_data;
2207  int mb_x, mb_y;
2208 
2209  s->mv_bounds.mv_min.y = -MARGIN;
2210  s->mv_bounds.mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
2211  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
2213  ((s->mb_width + 1) * (mb_y + 1) + 1);
2214  int mb_xy = mb_y * s->mb_width;
2215 
2216  AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED * 0x01010101);
2217 
2218  s->mv_bounds.mv_min.x = -MARGIN;
2219  s->mv_bounds.mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
2220  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
2221  if (mb_y == 0)
2222  AV_WN32A((mb - s->mb_width - 1)->intra4x4_pred_mode_top,
2223  DC_PRED * 0x01010101);
2224  decode_mb_mode(s, &s->mv_bounds, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
2225  prev_frame && prev_frame->seg_map ?
2226  prev_frame->seg_map->data + mb_xy : NULL, 1, is_vp7);
2227  s->mv_bounds.mv_min.x -= 64;
2228  s->mv_bounds.mv_max.x -= 64;
2229  }
2230  s->mv_bounds.mv_min.y -= 64;
2231  s->mv_bounds.mv_max.y -= 64;
2232  }
2233 }
2234 
2235 static void vp7_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame,
2236  VP8Frame *prev_frame)
2237 {
2238  vp78_decode_mv_mb_modes(avctx, cur_frame, prev_frame, IS_VP7);
2239 }
2240 
2241 static void vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame,
2242  VP8Frame *prev_frame)
2243 {
2244  vp78_decode_mv_mb_modes(avctx, cur_frame, prev_frame, IS_VP8);
2245 }
2246 
2247 #if HAVE_THREADS
2248 #define check_thread_pos(td, otd, mb_x_check, mb_y_check) \
2249  do { \
2250  int tmp = (mb_y_check << 16) | (mb_x_check & 0xFFFF); \
2251  if (atomic_load(&otd->thread_mb_pos) < tmp) { \
2252  pthread_mutex_lock(&otd->lock); \
2253  atomic_store(&td->wait_mb_pos, tmp); \
2254  do { \
2255  if (atomic_load(&otd->thread_mb_pos) >= tmp) \
2256  break; \
2257  pthread_cond_wait(&otd->cond, &otd->lock); \
2258  } while (1); \
2259  atomic_store(&td->wait_mb_pos, INT_MAX); \
2260  pthread_mutex_unlock(&otd->lock); \
2261  } \
2262  } while (0)
2263 
2264 #define update_pos(td, mb_y, mb_x) \
2265  do { \
2266  int pos = (mb_y << 16) | (mb_x & 0xFFFF); \
2267  int sliced_threading = (avctx->active_thread_type == FF_THREAD_SLICE) && \
2268  (num_jobs > 1); \
2269  int is_null = !next_td || !prev_td; \
2270  int pos_check = (is_null) ? 1 : \
2271  (next_td != td && pos >= atomic_load(&next_td->wait_mb_pos)) || \
2272  (prev_td != td && pos >= atomic_load(&prev_td->wait_mb_pos)); \
2273  atomic_store(&td->thread_mb_pos, pos); \
2274  if (sliced_threading && pos_check) { \
2275  pthread_mutex_lock(&td->lock); \
2276  pthread_cond_broadcast(&td->cond); \
2277  pthread_mutex_unlock(&td->lock); \
2278  } \
2279  } while (0)
2280 #else
2281 #define check_thread_pos(td, otd, mb_x_check, mb_y_check) while(0)
2282 #define update_pos(td, mb_y, mb_x) while(0)
2283 #endif
2284 
2286  int jobnr, int threadnr, int is_vp7)
2287 {
2288  VP8Context *s = avctx->priv_data;
2289  VP8ThreadData *prev_td, *next_td, *td = &s->thread_data[threadnr];
2290  int mb_y = atomic_load(&td->thread_mb_pos) >> 16;
2291  int mb_x, mb_xy = mb_y * s->mb_width;
2292  int num_jobs = s->num_jobs;
2293  VP8Frame *curframe = s->curframe, *prev_frame = s->prev_frame;
2294  VP56RangeCoder *c = &s->coeff_partition[mb_y & (s->num_coeff_partitions - 1)];
2295  VP8Macroblock *mb;
2296  uint8_t *dst[3] = {
2297  curframe->tf.f->data[0] + 16 * mb_y * s->linesize,
2298  curframe->tf.f->data[1] + 8 * mb_y * s->uvlinesize,
2299  curframe->tf.f->data[2] + 8 * mb_y * s->uvlinesize
2300  };
2301 
2302  if (c->end <= c->buffer && c->bits >= 0)
2303  return AVERROR_INVALIDDATA;
2304 
2305  if (mb_y == 0)
2306  prev_td = td;
2307  else
2308  prev_td = &s->thread_data[(jobnr + num_jobs - 1) % num_jobs];
2309  if (mb_y == s->mb_height - 1)
2310  next_td = td;
2311  else
2312  next_td = &s->thread_data[(jobnr + 1) % num_jobs];
2313  if (s->mb_layout == 1)
2314  mb = s->macroblocks_base + ((s->mb_width + 1) * (mb_y + 1) + 1);
2315  else {
2316  // Make sure the previous frame has read its segmentation map,
2317  // if we re-use the same map.
2318  if (prev_frame && s->segmentation.enabled &&
2320  ff_thread_await_progress(&prev_frame->tf, mb_y, 0);
2321  mb = s->macroblocks + (s->mb_height - mb_y - 1) * 2;
2322  memset(mb - 1, 0, sizeof(*mb)); // zero left macroblock
2323  AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED * 0x01010101);
2324  }
2325 
2326  if (!is_vp7 || mb_y == 0)
2327  memset(td->left_nnz, 0, sizeof(td->left_nnz));
2328 
2329  td->mv_bounds.mv_min.x = -MARGIN;
2330  td->mv_bounds.mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
2331 
2332  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
2333  if (c->end <= c->buffer && c->bits >= 0)
2334  return AVERROR_INVALIDDATA;
2335  // Wait for previous thread to read mb_x+2, and reach mb_y-1.
2336  if (prev_td != td) {
2337  if (threadnr != 0) {
2338  check_thread_pos(td, prev_td,
2339  mb_x + (is_vp7 ? 2 : 1),
2340  mb_y - (is_vp7 ? 2 : 1));
2341  } else {
2342  check_thread_pos(td, prev_td,
2343  mb_x + (is_vp7 ? 2 : 1) + s->mb_width + 3,
2344  mb_y - (is_vp7 ? 2 : 1));
2345  }
2346  }
2347 
2348  s->vdsp.prefetch(dst[0] + (mb_x & 3) * 4 * s->linesize + 64,
2349  s->linesize, 4);
2350  s->vdsp.prefetch(dst[1] + (mb_x & 7) * s->uvlinesize + 64,
2351  dst[2] - dst[1], 2);
2352 
2353  if (!s->mb_layout)
2354  decode_mb_mode(s, &td->mv_bounds, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
2355  prev_frame && prev_frame->seg_map ?
2356  prev_frame->seg_map->data + mb_xy : NULL, 0, is_vp7);
2357 
2358  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_PREVIOUS);
2359 
2360  if (!mb->skip)
2361  decode_mb_coeffs(s, td, c, mb, s->top_nnz[mb_x], td->left_nnz, is_vp7);
2362 
2363  if (mb->mode <= MODE_I4x4)
2364  intra_predict(s, td, dst, mb, mb_x, mb_y, is_vp7);
2365  else
2366  inter_predict(s, td, dst, mb, mb_x, mb_y);
2367 
2368  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN);
2369 
2370  if (!mb->skip) {
2371  idct_mb(s, td, dst, mb);
2372  } else {
2373  AV_ZERO64(td->left_nnz);
2374  AV_WN64(s->top_nnz[mb_x], 0); // array of 9, so unaligned
2375 
2376  /* Reset DC block predictors if they would exist
2377  * if the mb had coefficients */
2378  if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
2379  td->left_nnz[8] = 0;
2380  s->top_nnz[mb_x][8] = 0;
2381  }
2382  }
2383 
2384  if (s->deblock_filter)
2385  filter_level_for_mb(s, mb, &td->filter_strength[mb_x], is_vp7);
2386 
2387  if (s->deblock_filter && num_jobs != 1 && threadnr == num_jobs - 1) {
2388  if (s->filter.simple)
2389  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2390  NULL, NULL, s->linesize, 0, 1);
2391  else
2392  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2393  dst[1], dst[2], s->linesize, s->uvlinesize, 0);
2394  }
2395 
2396  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN2);
2397 
2398  dst[0] += 16;
2399  dst[1] += 8;
2400  dst[2] += 8;
2401  td->mv_bounds.mv_min.x -= 64;
2402  td->mv_bounds.mv_max.x -= 64;
2403 
2404  if (mb_x == s->mb_width + 1) {
2405  update_pos(td, mb_y, s->mb_width + 3);
2406  } else {
2407  update_pos(td, mb_y, mb_x);
2408  }
2409  }
2410  return 0;
2411 }
2412 
2413 static int vp7_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata,
2414  int jobnr, int threadnr)
2415 {
2416  return decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr, 1);
2417 }
2418 
2419 static int vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata,
2420  int jobnr, int threadnr)
2421 {
2422  return decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr, 0);
2423 }
2424 
2425 static av_always_inline void filter_mb_row(AVCodecContext *avctx, void *tdata,
2426  int jobnr, int threadnr, int is_vp7)
2427 {
2428  VP8Context *s = avctx->priv_data;
2429  VP8ThreadData *td = &s->thread_data[threadnr];
2430  int mb_x, mb_y = atomic_load(&td->thread_mb_pos) >> 16, num_jobs = s->num_jobs;
2431  AVFrame *curframe = s->curframe->tf.f;
2432  VP8Macroblock *mb;
2433  VP8ThreadData *prev_td, *next_td;
2434  uint8_t *dst[3] = {
2435  curframe->data[0] + 16 * mb_y * s->linesize,
2436  curframe->data[1] + 8 * mb_y * s->uvlinesize,
2437  curframe->data[2] + 8 * mb_y * s->uvlinesize
2438  };
2439 
2440  if (s->mb_layout == 1)
2441  mb = s->macroblocks_base + ((s->mb_width + 1) * (mb_y + 1) + 1);
2442  else
2443  mb = s->macroblocks + (s->mb_height - mb_y - 1) * 2;
2444 
2445  if (mb_y == 0)
2446  prev_td = td;
2447  else
2448  prev_td = &s->thread_data[(jobnr + num_jobs - 1) % num_jobs];
2449  if (mb_y == s->mb_height - 1)
2450  next_td = td;
2451  else
2452  next_td = &s->thread_data[(jobnr + 1) % num_jobs];
2453 
2454  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb++) {
2455  VP8FilterStrength *f = &td->filter_strength[mb_x];
2456  if (prev_td != td)
2457  check_thread_pos(td, prev_td,
2458  (mb_x + 1) + (s->mb_width + 3), mb_y - 1);
2459  if (next_td != td)
2460  if (next_td != &s->thread_data[0])
2461  check_thread_pos(td, next_td, mb_x + 1, mb_y + 1);
2462 
2463  if (num_jobs == 1) {
2464  if (s->filter.simple)
2465  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2466  NULL, NULL, s->linesize, 0, 1);
2467  else
2468  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2469  dst[1], dst[2], s->linesize, s->uvlinesize, 0);
2470  }
2471 
2472  if (s->filter.simple)
2473  filter_mb_simple(s, dst[0], f, mb_x, mb_y);
2474  else
2475  filter_mb(s, dst, f, mb_x, mb_y, is_vp7);
2476  dst[0] += 16;
2477  dst[1] += 8;
2478  dst[2] += 8;
2479 
2480  update_pos(td, mb_y, (s->mb_width + 3) + mb_x);
2481  }
2482 }
2483 
2484 static void vp7_filter_mb_row(AVCodecContext *avctx, void *tdata,
2485  int jobnr, int threadnr)
2486 {
2487  filter_mb_row(avctx, tdata, jobnr, threadnr, 1);
2488 }
2489 
2490 static void vp8_filter_mb_row(AVCodecContext *avctx, void *tdata,
2491  int jobnr, int threadnr)
2492 {
2493  filter_mb_row(avctx, tdata, jobnr, threadnr, 0);
2494 }
2495 
2496 static av_always_inline
2497 int vp78_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr,
2498  int threadnr, int is_vp7)
2499 {
2500  VP8Context *s = avctx->priv_data;
2501  VP8ThreadData *td = &s->thread_data[jobnr];
2502  VP8ThreadData *next_td = NULL, *prev_td = NULL;
2503  VP8Frame *curframe = s->curframe;
2504  int mb_y, num_jobs = s->num_jobs;
2505  int ret;
2506 
2507  td->thread_nr = threadnr;
2508  td->mv_bounds.mv_min.y = -MARGIN - 64 * threadnr;
2509  td->mv_bounds.mv_max.y = ((s->mb_height - 1) << 6) + MARGIN - 64 * threadnr;
2510  for (mb_y = jobnr; mb_y < s->mb_height; mb_y += num_jobs) {
2511  atomic_store(&td->thread_mb_pos, mb_y << 16);
2512  ret = s->decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr);
2513  if (ret < 0) {
2514  update_pos(td, s->mb_height, INT_MAX & 0xFFFF);
2515  return ret;
2516  }
2517  if (s->deblock_filter)
2518  s->filter_mb_row(avctx, tdata, jobnr, threadnr);
2519  update_pos(td, mb_y, INT_MAX & 0xFFFF);
2520 
2521  td->mv_bounds.mv_min.y -= 64 * num_jobs;
2522  td->mv_bounds.mv_max.y -= 64 * num_jobs;
2523 
2524  if (avctx->active_thread_type == FF_THREAD_FRAME)
2525  ff_thread_report_progress(&curframe->tf, mb_y, 0);
2526  }
2527 
2528  return 0;
2529 }
2530 
2531 static int vp7_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata,
2532  int jobnr, int threadnr)
2533 {
2534  return vp78_decode_mb_row_sliced(avctx, tdata, jobnr, threadnr, IS_VP7);
2535 }
2536 
2537 static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata,
2538  int jobnr, int threadnr)
2539 {
2540  return vp78_decode_mb_row_sliced(avctx, tdata, jobnr, threadnr, IS_VP8);
2541 }
2542 
2543 
2544 static av_always_inline
2545 int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2546  AVPacket *avpkt, int is_vp7)
2547 {
2548  VP8Context *s = avctx->priv_data;
2549  int ret, i, referenced, num_jobs;
2550  enum AVDiscard skip_thresh;
2551  VP8Frame *av_uninit(curframe), *prev_frame;
2552 
2554 
2555  if (is_vp7)
2556  ret = vp7_decode_frame_header(s, avpkt->data, avpkt->size);
2557  else
2558  ret = vp8_decode_frame_header(s, avpkt->data, avpkt->size);
2559 
2560  if (ret < 0)
2561  goto err;
2562 
2563  prev_frame = s->framep[VP56_FRAME_CURRENT];
2564 
2565  referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT ||
2567 
2568  skip_thresh = !referenced ? AVDISCARD_NONREF
2569  : !s->keyframe ? AVDISCARD_NONKEY
2570  : AVDISCARD_ALL;
2571 
2572  if (avctx->skip_frame >= skip_thresh) {
2573  s->invisible = 1;
2574  memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
2575  goto skip_decode;
2576  }
2577  s->deblock_filter = s->filter.level && avctx->skip_loop_filter < skip_thresh;
2578 
2579  // release no longer referenced frames
2580  for (i = 0; i < 5; i++)
2581  if (s->frames[i].tf.f->data[0] &&
2582  &s->frames[i] != prev_frame &&
2583  &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
2584  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
2585  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2])
2586  vp8_release_frame(s, &s->frames[i]);
2587 
2588  curframe = s->framep[VP56_FRAME_CURRENT] = vp8_find_free_buffer(s);
2589 
2590  if (!s->colorspace)
2591  avctx->colorspace = AVCOL_SPC_BT470BG;
2592  if (s->fullrange)
2593  avctx->color_range = AVCOL_RANGE_JPEG;
2594  else
2595  avctx->color_range = AVCOL_RANGE_MPEG;
2596 
2597  /* Given that arithmetic probabilities are updated every frame, it's quite
2598  * likely that the values we have on a random interframe are complete
2599  * junk if we didn't start decode on a keyframe. So just don't display
2600  * anything rather than junk. */
2601  if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] ||
2602  !s->framep[VP56_FRAME_GOLDEN] ||
2603  !s->framep[VP56_FRAME_GOLDEN2])) {
2604  av_log(avctx, AV_LOG_WARNING,
2605  "Discarding interframe without a prior keyframe!\n");
2606  ret = AVERROR_INVALIDDATA;
2607  goto err;
2608  }
2609 
2610  curframe->tf.f->key_frame = s->keyframe;
2611  curframe->tf.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I
2613  if ((ret = vp8_alloc_frame(s, curframe, referenced)) < 0)
2614  goto err;
2615 
2616  // check if golden and altref are swapped
2617  if (s->update_altref != VP56_FRAME_NONE)
2619  else
2621 
2622  if (s->update_golden != VP56_FRAME_NONE)
2624  else
2626 
2627  if (s->update_last)
2628  s->next_framep[VP56_FRAME_PREVIOUS] = curframe;
2629  else
2631 
2632  s->next_framep[VP56_FRAME_CURRENT] = curframe;
2633 
2634  if (avctx->codec->update_thread_context)
2635  ff_thread_finish_setup(avctx);
2636 
2637  s->linesize = curframe->tf.f->linesize[0];
2638  s->uvlinesize = curframe->tf.f->linesize[1];
2639 
2640  memset(s->top_nnz, 0, s->mb_width * sizeof(*s->top_nnz));
2641  /* Zero macroblock structures for top/top-left prediction
2642  * from outside the frame. */
2643  if (!s->mb_layout)
2644  memset(s->macroblocks + s->mb_height * 2 - 1, 0,
2645  (s->mb_width + 1) * sizeof(*s->macroblocks));
2646  if (!s->mb_layout && s->keyframe)
2647  memset(s->intra4x4_pred_mode_top, DC_PRED, s->mb_width * 4);
2648 
2649  memset(s->ref_count, 0, sizeof(s->ref_count));
2650 
2651  if (s->mb_layout == 1) {
2652  // Make sure the previous frame has read its segmentation map,
2653  // if we re-use the same map.
2654  if (prev_frame && s->segmentation.enabled &&
2656  ff_thread_await_progress(&prev_frame->tf, 1, 0);
2657  if (is_vp7)
2658  vp7_decode_mv_mb_modes(avctx, curframe, prev_frame);
2659  else
2660  vp8_decode_mv_mb_modes(avctx, curframe, prev_frame);
2661  }
2662 
2663  if (avctx->active_thread_type == FF_THREAD_FRAME)
2664  num_jobs = 1;
2665  else
2666  num_jobs = FFMIN(s->num_coeff_partitions, avctx->thread_count);
2667  s->num_jobs = num_jobs;
2668  s->curframe = curframe;
2669  s->prev_frame = prev_frame;
2670  s->mv_bounds.mv_min.y = -MARGIN;
2671  s->mv_bounds.mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
2672  for (i = 0; i < MAX_THREADS; i++) {
2673  VP8ThreadData *td = &s->thread_data[i];
2674  atomic_init(&td->thread_mb_pos, 0);
2675  atomic_init(&td->wait_mb_pos, INT_MAX);
2676  }
2677  if (is_vp7)
2679  num_jobs);
2680  else
2682  num_jobs);
2683 
2684  ff_thread_report_progress(&curframe->tf, INT_MAX, 0);
2685  memcpy(&s->framep[0], &s->next_framep[0], sizeof(s->framep[0]) * 4);
2686 
2687 skip_decode:
2688  // if future frames don't use the updated probabilities,
2689  // reset them to the values we saved
2690  if (!s->update_probabilities)
2691  s->prob[0] = s->prob[1];
2692 
2693  if (!s->invisible) {
2694  if ((ret = av_frame_ref(data, curframe->tf.f)) < 0)
2695  return ret;
2696  *got_frame = 1;
2697  }
2698 
2699  return avpkt->size;
2700 err:
2701  memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
2702  return ret;
2703 }
2704 
2705 int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2706  AVPacket *avpkt)
2707 {
2708  return vp78_decode_frame(avctx, data, got_frame, avpkt, IS_VP8);
2709 }
2710 
2711 #if CONFIG_VP7_DECODER
2712 static int vp7_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2713  AVPacket *avpkt)
2714 {
2715  return vp78_decode_frame(avctx, data, got_frame, avpkt, IS_VP7);
2716 }
2717 #endif /* CONFIG_VP7_DECODER */
2718 
2720 {
2721  VP8Context *s = avctx->priv_data;
2722  int i;
2723 
2724  if (!s)
2725  return 0;
2726 
2727  vp8_decode_flush_impl(avctx, 1);
2728  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
2729  av_frame_free(&s->frames[i].tf.f);
2730 
2731  return 0;
2732 }
2733 
2735 {
2736  int i;
2737  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++) {
2738  s->frames[i].tf.f = av_frame_alloc();
2739  if (!s->frames[i].tf.f)
2740  return AVERROR(ENOMEM);
2741  }
2742  return 0;
2743 }
2744 
2745 static av_always_inline
2746 int vp78_decode_init(AVCodecContext *avctx, int is_vp7)
2747 {
2748  VP8Context *s = avctx->priv_data;
2749  int ret;
2750 
2751  s->avctx = avctx;
2752  s->vp7 = avctx->codec->id == AV_CODEC_ID_VP7;
2753  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2754  avctx->internal->allocate_progress = 1;
2755 
2756  ff_videodsp_init(&s->vdsp, 8);
2757 
2758  ff_vp78dsp_init(&s->vp8dsp);
2759  if (CONFIG_VP7_DECODER && is_vp7) {
2761  ff_vp7dsp_init(&s->vp8dsp);
2764  } else if (CONFIG_VP8_DECODER && !is_vp7) {
2766  ff_vp8dsp_init(&s->vp8dsp);
2769  }
2770 
2771  /* does not change for VP8 */
2772  memcpy(s->prob[0].scan, ff_zigzag_scan, sizeof(s->prob[0].scan));
2773 
2774  if ((ret = vp8_init_frames(s)) < 0) {
2775  ff_vp8_decode_free(avctx);
2776  return ret;
2777  }
2778 
2779  return 0;
2780 }
2781 
2782 #if CONFIG_VP7_DECODER
2783 static int vp7_decode_init(AVCodecContext *avctx)
2784 {
2785  return vp78_decode_init(avctx, IS_VP7);
2786 }
2787 #endif /* CONFIG_VP7_DECODER */
2788 
2790 {
2791  return vp78_decode_init(avctx, IS_VP8);
2792 }
2793 
2794 #if CONFIG_VP8_DECODER
2795 #if HAVE_THREADS
2796 static av_cold int vp8_decode_init_thread_copy(AVCodecContext *avctx)
2797 {
2798  VP8Context *s = avctx->priv_data;
2799  int ret;
2800 
2801  s->avctx = avctx;
2802 
2803  if ((ret = vp8_init_frames(s)) < 0) {
2804  ff_vp8_decode_free(avctx);
2805  return ret;
2806  }
2807 
2808  return 0;
2809 }
2810 
2811 #define REBASE(pic) ((pic) ? (pic) - &s_src->frames[0] + &s->frames[0] : NULL)
2812 
2813 static int vp8_decode_update_thread_context(AVCodecContext *dst,
2814  const AVCodecContext *src)
2815 {
2816  VP8Context *s = dst->priv_data, *s_src = src->priv_data;
2817  int i;
2818 
2819  if (s->macroblocks_base &&
2820  (s_src->mb_width != s->mb_width || s_src->mb_height != s->mb_height)) {
2821  free_buffers(s);
2822  s->mb_width = s_src->mb_width;
2823  s->mb_height = s_src->mb_height;
2824  }
2825 
2826  s->prob[0] = s_src->prob[!s_src->update_probabilities];
2827  s->segmentation = s_src->segmentation;
2828  s->lf_delta = s_src->lf_delta;
2829  memcpy(s->sign_bias, s_src->sign_bias, sizeof(s->sign_bias));
2830 
2831  for (i = 0; i < FF_ARRAY_ELEMS(s_src->frames); i++) {
2832  if (s_src->frames[i].tf.f->data[0]) {
2833  int ret = vp8_ref_frame(s, &s->frames[i], &s_src->frames[i]);
2834  if (ret < 0)
2835  return ret;
2836  }
2837  }
2838 
2839  s->framep[0] = REBASE(s_src->next_framep[0]);
2840  s->framep[1] = REBASE(s_src->next_framep[1]);
2841  s->framep[2] = REBASE(s_src->next_framep[2]);
2842  s->framep[3] = REBASE(s_src->next_framep[3]);
2843 
2844  return 0;
2845 }
2846 #endif /* HAVE_THREADS */
2847 #endif /* CONFIG_VP8_DECODER */
2848 
2849 #if CONFIG_VP7_DECODER
2850 AVCodec ff_vp7_decoder = {
2851  .name = "vp7",
2852  .long_name = NULL_IF_CONFIG_SMALL("On2 VP7"),
2853  .type = AVMEDIA_TYPE_VIDEO,
2854  .id = AV_CODEC_ID_VP7,
2855  .priv_data_size = sizeof(VP8Context),
2856  .init = vp7_decode_init,
2857  .close = ff_vp8_decode_free,
2858  .decode = vp7_decode_frame,
2859  .capabilities = AV_CODEC_CAP_DR1,
2861 };
2862 #endif /* CONFIG_VP7_DECODER */
2863 
2864 #if CONFIG_VP8_DECODER
2865 AVCodec ff_vp8_decoder = {
2866  .name = "vp8",
2867  .long_name = NULL_IF_CONFIG_SMALL("On2 VP8"),
2868  .type = AVMEDIA_TYPE_VIDEO,
2869  .id = AV_CODEC_ID_VP8,
2870  .priv_data_size = sizeof(VP8Context),
2872  .close = ff_vp8_decode_free,
2874  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
2877  .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp8_decode_init_thread_copy),
2878  .update_thread_context = ONLY_IF_THREADS_ENABLED(vp8_decode_update_thread_context),
2879 };
2880 #endif /* CONFIG_VP7_DECODER */
uint8_t golden
Definition: vp8.h:242
atomic_int wait_mb_pos
Definition: vp8.h:130
uint8_t inner_limit
Definition: vp8.h:77
#define VERT_PRED8x8
Definition: h264pred.h:70
VP8Macroblock * macroblocks
Definition: vp8.h:185
static const uint8_t vp8_dc_qlookup[VP8_MAX_QUANT+1]
Definition: vp8data.h:711
static av_always_inline void intra_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y, int is_vp7)
Definition: vp8.c:1596
static const uint8_t vp8_submv_prob[5][3]
Definition: vp8data.h:153
static const uint16_t vp7_ydc_qlookup[]
Definition: vp8data.h:778
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
#define NULL
Definition: coverity.c:32
const struct AVCodec * codec
Definition: avcodec.h:1770
discard all frames except keyframes
Definition: avcodec.h:829
Definition: vp9.h:47
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
Definition: videodsp.h:76
const char * s
Definition: avisynth_c.h:768
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static const uint8_t vp7_mv_default_prob[2][17]
Definition: vp8data.h:744
#define DC_128_PRED8x8
Definition: h264pred.h:76
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
Definition: os2threads.h:106
static void copy(const float *p1, float *p2, const int length)
(only used in prediction) no split MVs
Definition: vp8.h:72
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
static float alpha(float a)
void ff_vp7dsp_init(VP8DSPContext *c)
static void update_lf_deltas(VP8Context *s)
Definition: vp8.c:236
This structure describes decoded (raw) audio or video data.
Definition: frame.h:201
#define atomic_store(object, desired)
Definition: stdatomic.h:85
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
static void flush(AVCodecContext *avctx)
static int vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2419
static const uint8_t vp7_pred4x4_mode[]
Definition: vp8data.h:33
int8_t sign_bias[4]
one state [0, 1] per ref frame type
Definition: vp8.h:163
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1963
static av_always_inline int inter_predict_dc(int16_t block[16], int16_t pred[2])
Definition: vp8.c:1321
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
static int init_thread_copy(AVCodecContext *avctx)
Definition: tta.c:392
#define VP7_MV_PRED_COUNT
Definition: vp8data.h:68
AVFrame * f
Definition: thread.h:36
static av_always_inline int vp8_rac_get_tree(VP56RangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
Definition: vp56.h:380
uint8_t feature_value[4][4]
Definition: vp8.h:308
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:211
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
ptrdiff_t linesize
Definition: vp8.h:154
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:490
uint8_t * intra4x4_pred_mode_top
Definition: vp8.h:187
uint8_t mbskip_enabled
Definition: vp8.h:159
static VP56Frame ref_to_update(VP8Context *s, int update, VP56Frame ref)
Determine which buffers golden and altref should be updated with after this frame.
Definition: vp8.c:352
void(* vp8_v_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:48
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2498
static int vp7_decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2], const uint8_t scan[16])
Definition: vp8.c:1343
uint8_t token[4][16][3][NUM_DCT_TOKENS-1]
Definition: vp8.h:245
uint8_t scan[16]
Definition: vp8.h:247
int size
Definition: avcodec.h:1680
const char * b
Definition: vf_curves.c:113
static void vp8_decode_flush(AVCodecContext *avctx)
Definition: vp8.c:120
#define MARGIN
Definition: vp8.c:2201
vp8_mc_func put_vp8_bilinear_pixels_tab[3][3][3]
Definition: vp8dsp.h:81
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1989
VP56mv bmv[16]
Definition: vp8.h:93
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:411
av_cold void ff_vp78dsp_init(VP8DSPContext *dsp)
Definition: vp8dsp.c:666
uint8_t inner_filter
Definition: vp8.h:78
static const int8_t vp8_pred8x8c_tree[3][2]
Definition: vp8data.h:180
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:222
uint8_t segmentid[3]
Definition: vp8.h:238
static const uint16_t vp7_y2dc_qlookup[]
Definition: vp8data.h:803
discard all
Definition: avcodec.h:830
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
static void copy_chroma(AVFrame *dst, AVFrame *src, int width, int height)
Definition: vp8.c:427
#define src
Definition: vp8dsp.c:254
#define HOR_PRED8x8
Definition: h264pred.h:69
AVCodec.
Definition: avcodec.h:3739
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:87
uint8_t sharpness
Definition: vp8.h:182
#define AV_WN32A(p, v)
Definition: intreadwrite.h:543
2 16x8 blocks (vertical)
Definition: vp8.h:68
#define AV_COPY32(d, s)
Definition: intreadwrite.h:591
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
Definition: os2threads.h:138
int update_probabilities
If this flag is not set, all the probability updates are discarded after this frame is decoded...
Definition: vp8.h:260
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:42
VP8intmv mv_min
Definition: vp8.h:102
VP8Frame * framep[4]
Definition: vp8.h:147
static int vp8_decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2])
Definition: vp8.c:1355
static void vp7_filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2484
#define VP7_MVC_SIZE
Definition: vp8.c:393
static int vp7_read_mv_component(VP56RangeCoder *c, const uint8_t *p)
Definition: vp8.c:814
vp8_mc_func put_vp8_epel_pixels_tab[3][3][3]
first dimension: 4-log2(width) second dimension: 0 if no vertical interpolation is needed; 1 4-tap ve...
Definition: vp8dsp.h:80
static av_always_inline const uint8_t * get_submv_prob(uint32_t left, uint32_t top, int is_vp7)
Definition: vp8.c:825
static const uint8_t vp8_pred8x8c_prob_inter[3]
Definition: vp8data.h:189
static av_always_inline int decode_block_coeffs(VP56RangeCoder *c, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, int zero_nhood, int16_t qmul[2], const uint8_t scan[16], int vp7)
Definition: vp8.c:1380
uint8_t(* top_nnz)[9]
Definition: vp8.h:227
int num_jobs
Definition: vp8.h:277
static const uint8_t vp8_mbsplits[5][16]
Definition: vp8data.h:127
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:3386
#define AV_RN32A(p)
Definition: intreadwrite.h:531
static int16_t block[64]
Definition: dct.c:115
uint8_t pred16x16[4]
Definition: vp8.h:243
static const int8_t vp8_pred16x16_tree_intra[4][2]
Definition: vp8data.h:47
uint8_t update_map
Definition: vp8.h:174
#define PLANE_PRED8x8
Definition: h264pred.h:71
uint16_t mb_height
Definition: vp8.h:153
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int16_t y
Definition: vp56.h:68
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
int update_golden
VP56_FRAME_NONE if not updated, or which frame to copy if so.
Definition: vp8.h:253
static av_always_inline void filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2425
uint8_t intra4x4_pred_mode_top[4]
Definition: vp8.h:91
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:102
static av_always_inline void clamp_mv(VP8mvbounds *s, VP56mv *dst, const VP56mv *src)
Definition: vp8.c:775
uint8_t
static int vp7_update_dimensions(VP8Context *s, int width, int height)
Definition: vp8.c:204
#define av_cold
Definition: attributes.h:82
ptrdiff_t uvlinesize
Definition: vp8.h:155
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
#define mb
#define DC_PRED8x8
Definition: h264pred.h:68
int fade_present
Fade bit present in bitstream (VP7)
Definition: vp8.h:293
static av_always_inline void vp7_decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
Definition: vp8.c:943
static VP8Frame * vp8_find_free_buffer(VP8Context *s)
Definition: vp8.c:125
uint8_t ref_frame
Definition: vp8.h:86
static av_always_inline int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y, int *copy_buf, int vp7)
Definition: vp8.c:1560
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
Multithreading support functions.
Definition: vp9.h:46
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: vp8.c:2705
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:395
uint8_t mvc[2][19]
Definition: vp8.h:246
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:343
VP56mv mv
Definition: vp8.h:92
int8_t base_quant[4]
Definition: vp8.h:175
static const uint8_t vp8_mv_update_prob[2][19]
Definition: vp8data.h:733
static AVFrame * frame
void(* pred8x8[4+3+4])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:97
int update_last
update VP56_FRAME_PREVIOUS with the current one
Definition: vp8.h:252
#define height
uint8_t * data
Definition: avcodec.h:1679
int8_t yoffset
Definition: vp8data.h:62
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
Definition: utils.c:2060
static void parse_segment_info(VP8Context *s)
Definition: vp8.c:215
ptrdiff_t size
Definition: opengl_enc.c:101
VP8Frame * prev_frame
Definition: vp8.h:150
int num_coeff_partitions
All coefficients are contained in separate arith coding contexts.
Definition: vp8.h:266
static const uint8_t vp8_token_default_probs[4][8][3][NUM_DCT_TOKENS-1]
Definition: vp8data.h:369
vp8_mc_func put_pixels_tab[3][3][3]
Definition: vp8.h:271
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.h:93
#define AV_COPY64(d, s)
Definition: intreadwrite.h:595
uint8_t feature_index_prob[4][3]
Definition: vp8.h:307
uint8_t intra4x4_pred_mode_mb[16]
Definition: vp8.h:90
#define av_log(a,...)
static av_always_inline int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt, int is_vp7)
Definition: vp8.c:2545
uint8_t intra4x4_pred_mode_left[4]
Definition: vp8.h:188
#define VERT_VP8_PRED
for VP8, VERT_PRED is the average of
Definition: h264pred.h:60
uint8_t colorspace
0 is the only value allowed (meaning bt601)
Definition: vp8.h:274
static const VP56mv * get_bmv_ptr(const VP8Macroblock *mb, int subblock)
Definition: vp8.c:937
enum AVCodecID id
Definition: avcodec.h:3753
static const uint8_t vp8_mbsplit_count[4]
Definition: vp8data.h:142
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static const int8_t vp8_coeff_band_indexes[8][10]
Definition: vp8data.h:331
#define td
Definition: regdef.h:70
H264PredContext hpc
Definition: vp8.h:270
Definition: vp8.h:138
static const uint8_t vp8_pred4x4_mode[]
Definition: vp8data.h:40
static av_always_inline void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int mb_xy, int ref)
Definition: vp8.c:1880
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
uint8_t absolute_vals
Definition: vp8.h:173
uint16_t mb_width
Definition: vp8.h:152
void(* vp8_luma_dc_wht_dc)(int16_t block[4][4][16], int16_t dc[16])
Definition: vp8dsp.h:39
static const uint8_t vp8_dct_cat2_prob[]
Definition: vp8data.h:345
static const uint8_t vp8_mv_default_prob[2][19]
Definition: vp8data.h:755
#define atomic_load(object)
Definition: stdatomic.h:93
#define FF_SIGNBIT(x)
Definition: internal.h:92
uint8_t last
Definition: vp8.h:241
static const int sizes[][2]
Definition: img2dec.c:51
#define AVERROR(e)
Definition: error.h:43
void(* vp8_h_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:54
static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:645
static void fade(uint8_t *dst, ptrdiff_t dst_linesize, const uint8_t *src, ptrdiff_t src_linesize, int width, int height, int alpha, int beta)
Definition: vp8.c:438
uint8_t mode
Definition: vp8.h:85
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:163
static av_always_inline int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1525
static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2537
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:179
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:3211
const char * r
Definition: vf_curves.c:111
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
VP8 compatible video decoder.
void(* vp8_v_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:52
static const uint8_t vp8_mbfirstidx[4][16]
Definition: vp8data.h:135
AVCodecContext * avctx
Definition: vp8.h:146
uint16_t width
Definition: gdv.c:47
VP8mvbounds mv_bounds
Definition: vp8.h:161
#define EDGE_EMU_LINESIZE
Definition: vp8.h:132
uint16_t inter_dc_pred[2][2]
Interframe DC prediction (VP7) [0] VP56_FRAME_PREVIOUS [1] VP56_FRAME_GOLDEN.
Definition: vp8.h:300
VideoDSPContext vdsp
Definition: vp8.h:268
const char * name
Name of the codec implementation.
Definition: avcodec.h:3746
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_RL24
Definition: bytestream.h:87
VP8Macroblock * macroblocks_base
Definition: vp8.h:250
static av_always_inline void vp8_mc_part(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], ThreadFrame *ref_frame, int x_off, int y_off, int bx_off, int by_off, int block_w, int block_h, int width, int height, VP56mv *mv)
Definition: vp8.c:1842
static av_always_inline void decode_mb_mode(VP8Context *s, VP8mvbounds *mv_bounds, VP8Macroblock *mb, int mb_x, int mb_y, uint8_t *segment, uint8_t *ref, int layout, int is_vp7)
Definition: vp8.c:1169
static const uint8_t vp8_pred4x4_prob_inter[9]
Definition: vp8data.h:192
struct VP8Context::@153 qmat[4]
Macroblocks can have one of 4 different quants in a frame when segmentation is enabled.
uint8_t edge_emu_buffer[21 *EDGE_EMU_LINESIZE]
Definition: vp8.h:133
int16_t block[6][4][16]
Definition: vp8.h:107
static av_always_inline int decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2], const uint8_t scan[16], int vp7)
Definition: vp8.c:1261
static const int vp7_mode_contexts[31][4]
Definition: vp8data.h:84
static void vp8_filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2490
static void vp7_get_quants(VP8Context *s)
Definition: vp8.c:287
#define FFMAX(a, b)
Definition: common.h:94
uint8_t keyframe
Definition: vp8.h:157
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1065
int x
Definition: vp8.h:97
const uint8_t * end
Definition: vp56.h:90
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:218
VP56Frame
Definition: vp56.h:40
int(* decode_mb_row_no_filter)(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.h:285
int16_t luma_qmul[2]
Definition: vp8.h:197
static const uint8_t vp8_pred16x16_prob_inter[4]
Definition: vp8data.h:164
Definition: hls.c:67
useful rectangle filling function
int ff_vp56_init_range_decoder(VP56RangeCoder *c, const uint8_t *buf, int buf_size)
Definition: vp56rac.c:40
#define MAX_THREADS
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int64_t nb_samples_notify, AVRational time_base)
4x4 blocks of 4x4px each
Definition: vp8.h:71
uint8_t deblock_filter
Definition: vp8.h:158
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:3203
#define H_LOOP_FILTER_16Y_INNER(cond)
#define FFMIN(a, b)
Definition: common.h:96
uint8_t feature_present_prob[4]
Definition: vp8.h:306
static av_always_inline void vp8_mc_chroma(VP8Context *s, VP8ThreadData *td, uint8_t *dst1, uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3])
chroma MC function
Definition: vp8.c:1792
uint8_t fullrange
whether we can skip clamping in dsp functions
Definition: vp8.h:275
int16_t block_dc[16]
Definition: vp8.h:108
static av_unused int vp8_rac_get_sint(VP56RangeCoder *c, int bits)
Definition: vp56.h:336
int width
picture width / height.
Definition: avcodec.h:1948
uint8_t mbskip
Definition: vp8.h:239
int8_t ref[4]
filter strength adjustment for macroblocks that reference: [0] - intra / VP56_FRAME_CURRENT [1] - VP5...
Definition: vp8.h:223
static int vp7_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2413
void(* filter_mb_row)(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.h:286
void(* vp8_idct_dc_add4y)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
Definition: vp8dsp.h:42
static av_cold int vp8_init_frames(VP8Context *s)
Definition: vp8.c:2734
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
static void free_buffers(VP8Context *s)
Definition: vp8.c:49
int32_t
AVFormatContext * ctx
Definition: movenc.c:48
#define check_thread_pos(td, otd, mb_x_check, mb_y_check)
Definition: vp8.c:2281
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
Definition: os2threads.h:98
static int vp8_read_mv_component(VP56RangeCoder *c, const uint8_t *p)
Definition: vp8.c:819
struct VP8Context::@152 filter
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:3204
void(* vp8_mc_func)(uint8_t *dst, ptrdiff_t dstStride, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: vp8dsp.h:33
int16_t luma_dc_qmul[2]
luma dc-only block quant
Definition: vp8.h:198
int16_t chroma_qmul[2]
Definition: vp8.h:199
static const uint8_t vp8_pred4x4_prob_intra[10][10][9]
Definition: vp8data.h:196
VP8mvbounds mv_bounds
Definition: vp8.h:135
uint8_t(* top_border)[16+8+8]
Definition: vp8.h:226
int n
Definition: avisynth_c.h:684
ThreadFrame tf
Definition: vp8.h:139
static av_always_inline int decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2285
static av_always_inline void filter_level_for_mb(VP8Context *s, VP8Macroblock *mb, VP8FilterStrength *f, int is_vp7)
Definition: vp8.c:2049
static const int8_t vp7_feature_index_tree[4][2]
Definition: vp8data.h:771
static const uint8_t vp7_feature_value_size[2][4]
Definition: vp8data.h:766
#define vp56_rac_get_prob
Definition: vp56.h:254
static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
Definition: vp8.c:107
static av_always_inline void decode_mb_coeffs(VP8Context *s, VP8ThreadData *td, VP56RangeCoder *c, VP8Macroblock *mb, uint8_t t_nnz[9], uint8_t l_nnz[9], int is_vp7)
Definition: vp8.c:1395
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
static void vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame, VP8Frame *prev_frame)
Definition: vp8.c:2241
#define cat(a, bpp, b)
Definition: vp9dsp_init.h:29
uint8_t segment
Definition: vp8.h:89
#define FF_ARRAY_ELEMS(a)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:3192
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:510
int8_t xoffset
Definition: vp8data.h:63
static const float pred[4]
Definition: siprdata.h:259
static int vp7_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2531
#define IS_VP8
Definition: vp8dsp.h:104
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:1069
static const int8_t mv[256][2]
Definition: 4xm.c:77
static void vp7_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame, VP8Frame *prev_frame)
Definition: vp8.c:2235
static av_always_inline int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1534
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
Definition: vp56.h:271
void(* vp8_v_loop_filter8uv_inner)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:62
struct VP8Context::@154 lf_delta
void(* vp8_h_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
Definition: vp8dsp.h:70
#define src1
Definition: h264pred.c:139
static av_always_inline void inter_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y)
Apply motion vectors to prediction buffer, chapter 18.
Definition: vp8.c:1903
VP8Frame * curframe
Definition: vp8.h:149
uint8_t simple
Definition: vp8.h:180
void(* vp8_idct_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
Definition: vp8dsp.h:40
VP8Frame frames[5]
Definition: vp8.h:272
Libavcodec external API header.
static const uint8_t vp8_pred8x8c_prob_intra[3]
Definition: vp8data.h:186
uint8_t level
Definition: vp8.h:181
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:232
static void vp8_release_frame(VP8Context *s, VP8Frame *f)
Definition: vp8.c:82
AVBufferRef * seg_map
Definition: vp8.h:140
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
static const uint16_t vp7_yac_qlookup[]
Definition: vp8data.h:790
main external API structure.
Definition: avcodec.h:1761
static int vp7_fade_frame(VP8Context *s, VP56RangeCoder *c)
Definition: vp8.c:452
uint8_t * data
The data buffer.
Definition: buffer.h:89
VP8Frame * next_framep[4]
Definition: vp8.h:148
int mb_layout
This describes the macroblock memory layout.
Definition: vp8.h:283
uint8_t left_nnz[9]
For coeff decode, we need to know whether the above block had non-zero coefficients.
Definition: vp8.h:123
static const uint8_t vp8_mbsplit_prob[3]
Definition: vp8data.h:145
VP56RangeCoder c
header context, includes mb modes and motion vectors
Definition: vp8.h:229
void * buf
Definition: avisynth_c.h:690
int y
Definition: vp8.h:98
int bits
Definition: vp56.h:87
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:98
VP56RangeCoder coeff_partition[8]
Definition: vp8.h:267
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
static const int8_t vp8_pred16x16_tree_inter[4][2]
Definition: vp8data.h:54
int vp7
Definition: vp8.h:288
static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:260
int coded_height
Definition: avcodec.h:1963
static int vp8_update_dimensions(VP8Context *s, int width, int height)
Definition: vp8.c:209
int index
Definition: gxfenc.c:89
VP8FilterStrength * filter_strength
Definition: vp8.h:134
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2491
void(* vp8_idct_dc_add4uv)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
Definition: vp8dsp.h:44
static av_always_inline int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
Definition: vp8.c:1516
static void vp78_update_probability_tables(VP8Context *s)
Definition: vp8.c:377
#define MV_EDGE_CHECK(n)
static const int8_t vp8_pred4x4_tree[9][2]
Definition: vp8data.h:168
uint8_t enabled
whether each mb can have a different strength based on mode/ref
Definition: vp8.h:172
static av_always_inline void idct_mb(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb)
Definition: vp8.c:1987
static void vp78_update_pred16x16_pred8x8_mvc_probabilities(VP8Context *s, int mvc_size)
Definition: vp8.c:396
static av_always_inline int read_mv_component(VP56RangeCoder *c, const uint8_t *p, int vp7)
Motion vector coding, 17.1.
Definition: vp8.c:786
static const uint8_t subpel_idx[3][8]
Definition: vp8.c:1710
static void update_refs(VP8Context *s)
Definition: vp8.c:416
static av_always_inline int vp8_rac_get_coeff(VP56RangeCoder *c, const uint8_t *prob)
Definition: vp56.h:393
static const uint8_t vp8_coeff_band[16]
Definition: vp8data.h:325
int allocate_progress
Whether to allocate progress for frame threading.
Definition: internal.h:153
struct VP8Context::@155 prob[2]
These are all of the updatable probabilities for binary decisions.
static const uint16_t vp8_ac_qlookup[VP8_MAX_QUANT+1]
Definition: vp8data.h:722
static const uint8_t vp8_pred16x16_prob_intra[4]
Definition: vp8data.h:161
uint8_t score
Definition: vp8data.h:65
mfxU16 profile
Definition: qsvenc.c:44
static av_always_inline void decode_intra4x4_modes(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int mb_x, int keyframe, int layout)
Definition: vp8.c:1134
static int vp8_rac_get_uint(VP56RangeCoder *c, int bits)
Definition: vp56.h:324
#define DC_127_PRED8x8
Definition: h264pred.h:85
void(* vp8_luma_dc_wht)(int16_t block[4][4][16], int16_t dc[16])
Definition: vp8dsp.h:38
Definition: vp56.h:66
av_cold int ff_vp8_decode_init(AVCodecContext *avctx)
Definition: vp8.c:2789
int update_altref
Definition: vp8.h:254
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:215
uint8_t feature_enabled[4]
Macroblock features (VP7)
Definition: vp8.h:305
int8_t mode[VP8_MVMODE_SPLIT+1]
filter strength adjustment for the following macroblock modes: [0-3] - i16x16 (always zero) [4] - i4x...
Definition: vp8.h:214
2 8x16 blocks (horizontal)
Definition: vp8.h:69
av_cold int ff_vp8_decode_free(AVCodecContext *avctx)
Definition: vp8.c:2719
Definition: vp9.h:48
#define AV_ZERO128(d)
Definition: intreadwrite.h:627
uint8_t pred8x8c[3]
Definition: vp8.h:244
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:509
discard all non reference
Definition: avcodec.h:826
static av_always_inline void vp78_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *curframe, VP8Frame *prev_frame, int is_vp7)
Definition: vp8.c:2203
uint8_t partitioning
Definition: vp8.h:87
#define AV_ZERO64(d)
Definition: intreadwrite.h:623
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
void(* vp8_v_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
Definition: vp8dsp.h:69
int16_t x
Definition: vp56.h:67
common internal api header.
static void vp8_get_quants(VP8Context *s)
Definition: vp8.c:306
if(ret< 0)
Definition: vf_mcdeint.c:279
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
#define LOCAL_ALIGNED(a, t, v,...)
Definition: internal.h:113
#define AV_COPY128(d, s)
Definition: intreadwrite.h:599
static int ref_frame(Vp3DecodeContext *s, ThreadFrame *dst, ThreadFrame *src)
Definition: vp3.c:1922
static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
Definition: vp8.c:69
uint8_t chroma_pred_mode
Definition: vp8.h:88
static double c[64]
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
VP8intmv mv_max
Definition: vp8.h:103
static av_always_inline void backup_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, ptrdiff_t linesize, ptrdiff_t uvlinesize, int simple)
Definition: vp8.c:1470
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
Definition: os2threads.h:127
#define DC_129_PRED8x8
Definition: h264pred.h:86
enum AVDiscard skip_loop_filter
Skip loop filtering for selected frames.
Definition: avcodec.h:3372
static av_always_inline int vp8_rac_get(VP56RangeCoder *c)
Definition: vp56.h:308
int invisible
Definition: vp8.h:251
static av_always_inline int decode_splitmvs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int layout, int is_vp7)
Split motion vector prediction, 16.4.
Definition: vp8.c:842
static const SiprModeParam modes[MODE_COUNT]
Definition: sipr.c:69
int ref_count[3]
Definition: vp8.h:164
void * priv_data
Definition: avcodec.h:1803
int(* update_thread_context)(AVCodecContext *dst, const AVCodecContext *src)
Copy necessary context variables from a previous thread context to the current one.
Definition: avcodec.h:3794
static av_always_inline int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1550
void(* vp8_h_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:50
#define MODE_I4x4
Definition: vp8.h:59
static int vp7_calculate_mb_offset(int mb_x, int mb_y, int mb_width, int xoffset, int yoffset, int boundary, int *edge_x, int *edge_y)
The vp7 reference decoder uses a padding macroblock column (added to right edge of the frame) to guar...
Definition: vp8.c:924
#define XCHG(a, b, xchg)
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:3252
#define update_pos(td, mb_y, mb_x)
Definition: vp8.c:2282
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1811
#define HOR_VP8_PRED
unaveraged version of HOR_PRED, see
Definition: h264pred.h:63
VP8DSPContext vp8dsp
Definition: vp8.h:269
static av_always_inline int update_dimensions(VP8Context *s, int width, int height, int is_vp7)
Definition: vp8.c:150
struct VP8Context::@151 segmentation
Base parameters for segmentation, i.e.
int thread_nr
Definition: vp8.h:124
static av_always_inline void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, ptrdiff_t linesize, ptrdiff_t uvlinesize, int mb_x, int mb_y, int mb_width, int simple, int xchg)
Definition: vp8.c:1482
#define AV_ZERO32(d)
Definition: intreadwrite.h:619
static av_always_inline int vp78_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2497
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
void(* vp8_idct_dc_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
Definition: vp8dsp.h:41
uint64_t layout
AVDiscard
Definition: avcodec.h:821
static av_unused int vp8_rac_get_nn(VP56RangeCoder *c)
Definition: vp56.h:358
void(* vp8_v_loop_filter16y_inner)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:58
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
#define av_uninit(x)
Definition: attributes.h:148
const uint8_t * buffer
Definition: vp56.h:89
static av_always_inline void vp8_mc_luma(VP8Context *s, VP8ThreadData *td, uint8_t *dst, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3])
luma MC function
Definition: vp8.c:1734
static const uint8_t vp8_token_update_probs[4][8][3][NUM_DCT_TOKENS-1]
Definition: vp8data.h:540
static av_always_inline void filter_mb(VP8Context *s, uint8_t *dst[3], VP8FilterStrength *f, int mb_x, int mb_y, int is_vp7)
Definition: vp8.c:2082
#define atomic_init(obj, value)
Definition: stdatomic.h:33
#define av_freep(p)
#define IS_VP7
Definition: vp8dsp.h:103
#define av_always_inline
Definition: attributes.h:39
int8_t filter_level[4]
base loop filter level
Definition: vp8.h:176
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
static const int vp8_mode_contexts[6][4]
Definition: vp8data.h:118
static const uint8_t vp8_dct_cat1_prob[]
Definition: vp8data.h:342
#define FFSWAP(type, a, b)
Definition: common.h:99
uint8_t intra
Definition: vp8.h:240
static av_always_inline void vp8_decode_mvs(VP8Context *s, VP8mvbounds *mv_bounds, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
Definition: vp8.c:1034
uint8_t non_zero_count_cache[6][4]
This is the index plus one of the last non-zero coeff for each of the blocks in the current macrobloc...
Definition: vp8.h:116
uint8_t skip
Definition: vp8.h:82
atomic_int thread_mb_pos
Definition: vp8.h:129
void ff_vp8dsp_init(VP8DSPContext *c)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
static void vp78_reset_probability_tables(VP8Context *s)
Definition: vp8.c:368
This structure stores compressed data.
Definition: avcodec.h:1656
#define VP8_MVC_SIZE
Definition: vp8.c:394
static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:491
uint8_t profile
Definition: vp8.h:160
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1397
const uint8_t *const ff_vp8_dct_cat_prob[]
Definition: vp8data.h:362
mode
Use these values in ebur128_init (or'ed).
Definition: ebur128.h:83
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:1002
VP8ThreadData * thread_data
Definition: vp8.h:145
Predicted.
Definition: avutil.h:275
2x2 blocks of 8x8px each
Definition: vp8.h:70
static av_always_inline void filter_mb_simple(VP8Context *s, uint8_t *dst, VP8FilterStrength *f, int mb_x, int mb_y)
Definition: vp8.c:2169
static const VP7MVPred vp7_mv_pred[VP7_MV_PRED_COUNT]
Definition: vp8data.h:69
static const uint16_t vp7_y2ac_qlookup[]
Definition: vp8data.h:816
static const uint8_t vp7_submv_prob[3]
Definition: vp8data.h:149
static av_always_inline int vp78_decode_init(AVCodecContext *avctx, int is_vp7)
Definition: vp8.c:2746
#define AV_WN64(p, v)
Definition: intreadwrite.h:385
uint8_t filter_level
Definition: vp8.h:76