FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vp8.c
Go to the documentation of this file.
1 /*
2  * VP7/VP8 compatible video decoder
3  *
4  * Copyright (C) 2010 David Conrad
5  * Copyright (C) 2010 Ronald S. Bultje
6  * Copyright (C) 2010 Fiona Glaser
7  * Copyright (C) 2012 Daniel Kang
8  * Copyright (C) 2014 Peter Ross
9  *
10  * This file is part of FFmpeg.
11  *
12  * FFmpeg is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU Lesser General Public
14  * License as published by the Free Software Foundation; either
15  * version 2.1 of the License, or (at your option) any later version.
16  *
17  * FFmpeg is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20  * Lesser General Public License for more details.
21  *
22  * You should have received a copy of the GNU Lesser General Public
23  * License along with FFmpeg; if not, write to the Free Software
24  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25  */
26 
27 #include "libavutil/imgutils.h"
28 
29 #include "avcodec.h"
30 #include "internal.h"
31 #include "mathops.h"
32 #include "rectangle.h"
33 #include "thread.h"
34 #include "vp8.h"
35 #include "vp8data.h"
36 
37 #if ARCH_ARM
38 # include "arm/vp8.h"
39 #endif
40 
41 #if CONFIG_VP7_DECODER && CONFIG_VP8_DECODER
42 #define VPX(vp7, f) (vp7 ? vp7_ ## f : vp8_ ## f)
43 #elif CONFIG_VP7_DECODER
44 #define VPX(vp7, f) vp7_ ## f
45 #else // CONFIG_VP8_DECODER
46 #define VPX(vp7, f) vp8_ ## f
47 #endif
48 
49 static void free_buffers(VP8Context *s)
50 {
51  int i;
52  if (s->thread_data)
53  for (i = 0; i < MAX_THREADS; i++) {
54 #if HAVE_THREADS
55  pthread_cond_destroy(&s->thread_data[i].cond);
57 #endif
59  }
60  av_freep(&s->thread_data);
63  av_freep(&s->top_nnz);
64  av_freep(&s->top_border);
65 
66  s->macroblocks = NULL;
67 }
68 
69 static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
70 {
71  int ret;
72  if ((ret = ff_thread_get_buffer(s->avctx, &f->tf,
73  ref ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
74  return ret;
75  if (!(f->seg_map = av_buffer_allocz(s->mb_width * s->mb_height))) {
77  return AVERROR(ENOMEM);
78  }
79  return 0;
80 }
81 
83 {
86 }
87 
88 #if CONFIG_VP8_DECODER
89 static int vp8_ref_frame(VP8Context *s, VP8Frame *dst, VP8Frame *src)
90 {
91  int ret;
92 
93  vp8_release_frame(s, dst);
94 
95  if ((ret = ff_thread_ref_frame(&dst->tf, &src->tf)) < 0)
96  return ret;
97  if (src->seg_map &&
98  !(dst->seg_map = av_buffer_ref(src->seg_map))) {
99  vp8_release_frame(s, dst);
100  return AVERROR(ENOMEM);
101  }
102 
103  return 0;
104 }
105 #endif /* CONFIG_VP8_DECODER */
106 
107 static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
108 {
109  VP8Context *s = avctx->priv_data;
110  int i;
111 
112  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
113  vp8_release_frame(s, &s->frames[i]);
114  memset(s->framep, 0, sizeof(s->framep));
115 
116  if (free_mem)
117  free_buffers(s);
118 }
119 
120 static void vp8_decode_flush(AVCodecContext *avctx)
121 {
122  vp8_decode_flush_impl(avctx, 0);
123 }
124 
126 {
127  VP8Frame *frame = NULL;
128  int i;
129 
130  // find a free buffer
131  for (i = 0; i < 5; i++)
132  if (&s->frames[i] != s->framep[VP56_FRAME_CURRENT] &&
133  &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
134  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
135  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2]) {
136  frame = &s->frames[i];
137  break;
138  }
139  if (i == 5) {
140  av_log(s->avctx, AV_LOG_FATAL, "Ran out of free frames!\n");
141  abort();
142  }
143  if (frame->tf.f->data[0])
144  vp8_release_frame(s, frame);
145 
146  return frame;
147 }
148 
149 static av_always_inline
150 int update_dimensions(VP8Context *s, int width, int height, int is_vp7)
151 {
152  AVCodecContext *avctx = s->avctx;
153  int i, ret;
154 
155  if (width != s->avctx->width || ((width+15)/16 != s->mb_width || (height+15)/16 != s->mb_height) && s->macroblocks_base ||
156  height != s->avctx->height) {
158 
159  ret = ff_set_dimensions(s->avctx, width, height);
160  if (ret < 0)
161  return ret;
162  }
163 
164  s->mb_width = (s->avctx->coded_width + 15) / 16;
165  s->mb_height = (s->avctx->coded_height + 15) / 16;
166 
167  s->mb_layout = is_vp7 || avctx->active_thread_type == FF_THREAD_SLICE &&
168  avctx->thread_count > 1;
169  if (!s->mb_layout) { // Frame threading and one thread
170  s->macroblocks_base = av_mallocz((s->mb_width + s->mb_height * 2 + 1) *
171  sizeof(*s->macroblocks));
173  } else // Sliced threading
174  s->macroblocks_base = av_mallocz((s->mb_width + 2) * (s->mb_height + 2) *
175  sizeof(*s->macroblocks));
176  s->top_nnz = av_mallocz(s->mb_width * sizeof(*s->top_nnz));
177  s->top_border = av_mallocz((s->mb_width + 1) * sizeof(*s->top_border));
179 
180  if (!s->macroblocks_base || !s->top_nnz || !s->top_border ||
181  !s->thread_data || (!s->intra4x4_pred_mode_top && !s->mb_layout)) {
182  free_buffers(s);
183  return AVERROR(ENOMEM);
184  }
185 
186  for (i = 0; i < MAX_THREADS; i++) {
188  av_mallocz(s->mb_width * sizeof(*s->thread_data[0].filter_strength));
189  if (!s->thread_data[i].filter_strength) {
190  free_buffers(s);
191  return AVERROR(ENOMEM);
192  }
193 #if HAVE_THREADS
194  pthread_mutex_init(&s->thread_data[i].lock, NULL);
195  pthread_cond_init(&s->thread_data[i].cond, NULL);
196 #endif
197  }
198 
199  s->macroblocks = s->macroblocks_base + 1;
200 
201  return 0;
202 }
203 
205 {
206  return update_dimensions(s, width, height, IS_VP7);
207 }
208 
210 {
211  return update_dimensions(s, width, height, IS_VP8);
212 }
213 
214 
216 {
217  VP56RangeCoder *c = &s->c;
218  int i;
219 
221 
222  if (vp8_rac_get(c)) { // update segment feature data
224 
225  for (i = 0; i < 4; i++)
227 
228  for (i = 0; i < 4; i++)
230  }
231  if (s->segmentation.update_map)
232  for (i = 0; i < 3; i++)
233  s->prob->segmentid[i] = vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
234 }
235 
237 {
238  VP56RangeCoder *c = &s->c;
239  int i;
240 
241  for (i = 0; i < 4; i++) {
242  if (vp8_rac_get(c)) {
243  s->lf_delta.ref[i] = vp8_rac_get_uint(c, 6);
244 
245  if (vp8_rac_get(c))
246  s->lf_delta.ref[i] = -s->lf_delta.ref[i];
247  }
248  }
249 
250  for (i = MODE_I4x4; i <= VP8_MVMODE_SPLIT; i++) {
251  if (vp8_rac_get(c)) {
252  s->lf_delta.mode[i] = vp8_rac_get_uint(c, 6);
253 
254  if (vp8_rac_get(c))
255  s->lf_delta.mode[i] = -s->lf_delta.mode[i];
256  }
257  }
258 }
259 
260 static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
261 {
262  const uint8_t *sizes = buf;
263  int i;
264  int ret;
265 
266  s->num_coeff_partitions = 1 << vp8_rac_get_uint(&s->c, 2);
267 
268  buf += 3 * (s->num_coeff_partitions - 1);
269  buf_size -= 3 * (s->num_coeff_partitions - 1);
270  if (buf_size < 0)
271  return -1;
272 
273  for (i = 0; i < s->num_coeff_partitions - 1; i++) {
274  int size = AV_RL24(sizes + 3 * i);
275  if (buf_size - size < 0)
276  return -1;
277 
278  ret = ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, size);
279  if (ret < 0)
280  return ret;
281  buf += size;
282  buf_size -= size;
283  }
284  return ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, buf_size);
285 }
286 
287 static void vp7_get_quants(VP8Context *s)
288 {
289  VP56RangeCoder *c = &s->c;
290 
291  int yac_qi = vp8_rac_get_uint(c, 7);
292  int ydc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
293  int y2dc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
294  int y2ac_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
295  int uvdc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
296  int uvac_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
297 
298  s->qmat[0].luma_qmul[0] = vp7_ydc_qlookup[ydc_qi];
299  s->qmat[0].luma_qmul[1] = vp7_yac_qlookup[yac_qi];
300  s->qmat[0].luma_dc_qmul[0] = vp7_y2dc_qlookup[y2dc_qi];
301  s->qmat[0].luma_dc_qmul[1] = vp7_y2ac_qlookup[y2ac_qi];
302  s->qmat[0].chroma_qmul[0] = FFMIN(vp7_ydc_qlookup[uvdc_qi], 132);
303  s->qmat[0].chroma_qmul[1] = vp7_yac_qlookup[uvac_qi];
304 }
305 
306 static void vp8_get_quants(VP8Context *s)
307 {
308  VP56RangeCoder *c = &s->c;
309  int i, base_qi;
310 
311  int yac_qi = vp8_rac_get_uint(c, 7);
312  int ydc_delta = vp8_rac_get_sint(c, 4);
313  int y2dc_delta = vp8_rac_get_sint(c, 4);
314  int y2ac_delta = vp8_rac_get_sint(c, 4);
315  int uvdc_delta = vp8_rac_get_sint(c, 4);
316  int uvac_delta = vp8_rac_get_sint(c, 4);
317 
318  for (i = 0; i < 4; i++) {
319  if (s->segmentation.enabled) {
320  base_qi = s->segmentation.base_quant[i];
321  if (!s->segmentation.absolute_vals)
322  base_qi += yac_qi;
323  } else
324  base_qi = yac_qi;
325 
326  s->qmat[i].luma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + ydc_delta, 7)];
327  s->qmat[i].luma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi, 7)];
328  s->qmat[i].luma_dc_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + y2dc_delta, 7)] * 2;
329  /* 101581>>16 is equivalent to 155/100 */
330  s->qmat[i].luma_dc_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + y2ac_delta, 7)] * 101581 >> 16;
331  s->qmat[i].chroma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + uvdc_delta, 7)];
332  s->qmat[i].chroma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + uvac_delta, 7)];
333 
334  s->qmat[i].luma_dc_qmul[1] = FFMAX(s->qmat[i].luma_dc_qmul[1], 8);
335  s->qmat[i].chroma_qmul[0] = FFMIN(s->qmat[i].chroma_qmul[0], 132);
336  }
337 }
338 
339 /**
340  * Determine which buffers golden and altref should be updated with after this frame.
341  * The spec isn't clear here, so I'm going by my understanding of what libvpx does
342  *
343  * Intra frames update all 3 references
344  * Inter frames update VP56_FRAME_PREVIOUS if the update_last flag is set
345  * If the update (golden|altref) flag is set, it's updated with the current frame
346  * if update_last is set, and VP56_FRAME_PREVIOUS otherwise.
347  * If the flag is not set, the number read means:
348  * 0: no update
349  * 1: VP56_FRAME_PREVIOUS
350  * 2: update golden with altref, or update altref with golden
351  */
353 {
354  VP56RangeCoder *c = &s->c;
355 
356  if (update)
357  return VP56_FRAME_CURRENT;
358 
359  switch (vp8_rac_get_uint(c, 2)) {
360  case 1:
361  return VP56_FRAME_PREVIOUS;
362  case 2:
364  }
365  return VP56_FRAME_NONE;
366 }
367 
369 {
370  int i, j;
371  for (i = 0; i < 4; i++)
372  for (j = 0; j < 16; j++)
373  memcpy(s->prob->token[i][j], vp8_token_default_probs[i][vp8_coeff_band[j]],
374  sizeof(s->prob->token[i][j]));
375 }
376 
378 {
379  VP56RangeCoder *c = &s->c;
380  int i, j, k, l, m;
381 
382  for (i = 0; i < 4; i++)
383  for (j = 0; j < 8; j++)
384  for (k = 0; k < 3; k++)
385  for (l = 0; l < NUM_DCT_TOKENS-1; l++)
387  int prob = vp8_rac_get_uint(c, 8);
388  for (m = 0; vp8_coeff_band_indexes[j][m] >= 0; m++)
389  s->prob->token[i][vp8_coeff_band_indexes[j][m]][k][l] = prob;
390  }
391 }
392 
393 #define VP7_MVC_SIZE 17
394 #define VP8_MVC_SIZE 19
395 
397  int mvc_size)
398 {
399  VP56RangeCoder *c = &s->c;
400  int i, j;
401 
402  if (vp8_rac_get(c))
403  for (i = 0; i < 4; i++)
404  s->prob->pred16x16[i] = vp8_rac_get_uint(c, 8);
405  if (vp8_rac_get(c))
406  for (i = 0; i < 3; i++)
407  s->prob->pred8x8c[i] = vp8_rac_get_uint(c, 8);
408 
409  // 17.2 MV probability update
410  for (i = 0; i < 2; i++)
411  for (j = 0; j < mvc_size; j++)
413  s->prob->mvc[i][j] = vp8_rac_get_nn(c);
414 }
415 
416 static void update_refs(VP8Context *s)
417 {
418  VP56RangeCoder *c = &s->c;
419 
420  int update_golden = vp8_rac_get(c);
421  int update_altref = vp8_rac_get(c);
422 
423  s->update_golden = ref_to_update(s, update_golden, VP56_FRAME_GOLDEN);
424  s->update_altref = ref_to_update(s, update_altref, VP56_FRAME_GOLDEN2);
425 }
426 
427 static void copy_chroma(AVFrame *dst, AVFrame *src, int width, int height)
428 {
429  int i, j;
430 
431  for (j = 1; j < 3; j++) {
432  for (i = 0; i < height / 2; i++)
433  memcpy(dst->data[j] + i * dst->linesize[j],
434  src->data[j] + i * src->linesize[j], width / 2);
435  }
436 }
437 
438 static void fade(uint8_t *dst, ptrdiff_t dst_linesize,
439  const uint8_t *src, ptrdiff_t src_linesize,
440  int width, int height,
441  int alpha, int beta)
442 {
443  int i, j;
444  for (j = 0; j < height; j++) {
445  for (i = 0; i < width; i++) {
446  uint8_t y = src[j * src_linesize + i];
447  dst[j * dst_linesize + i] = av_clip_uint8(y + ((y * beta) >> 8) + alpha);
448  }
449  }
450 }
451 
453 {
454  int alpha = (int8_t) vp8_rac_get_uint(c, 8);
455  int beta = (int8_t) vp8_rac_get_uint(c, 8);
456  int ret;
457 
458  if (!s->keyframe && (alpha || beta)) {
459  int width = s->mb_width * 16;
460  int height = s->mb_height * 16;
461  AVFrame *src, *dst;
462 
463  if (!s->framep[VP56_FRAME_PREVIOUS] ||
464  !s->framep[VP56_FRAME_GOLDEN]) {
465  av_log(s->avctx, AV_LOG_WARNING, "Discarding interframe without a prior keyframe!\n");
466  return AVERROR_INVALIDDATA;
467  }
468 
469  dst =
470  src = s->framep[VP56_FRAME_PREVIOUS]->tf.f;
471 
472  /* preserve the golden frame, write a new previous frame */
475  if ((ret = vp8_alloc_frame(s, s->framep[VP56_FRAME_PREVIOUS], 1)) < 0)
476  return ret;
477 
478  dst = s->framep[VP56_FRAME_PREVIOUS]->tf.f;
479 
480  copy_chroma(dst, src, width, height);
481  }
482 
483  fade(dst->data[0], dst->linesize[0],
484  src->data[0], src->linesize[0],
485  width, height, alpha, beta);
486  }
487 
488  return 0;
489 }
490 
491 static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
492 {
493  VP56RangeCoder *c = &s->c;
494  int part1_size, hscale, vscale, i, j, ret;
495  int width = s->avctx->width;
496  int height = s->avctx->height;
497 
498  if (buf_size < 4) {
499  return AVERROR_INVALIDDATA;
500  }
501 
502  s->profile = (buf[0] >> 1) & 7;
503  if (s->profile > 1) {
504  avpriv_request_sample(s->avctx, "Unknown profile %d", s->profile);
505  return AVERROR_INVALIDDATA;
506  }
507 
508  s->keyframe = !(buf[0] & 1);
509  s->invisible = 0;
510  part1_size = AV_RL24(buf) >> 4;
511 
512  if (buf_size < 4 - s->profile + part1_size) {
513  av_log(s->avctx, AV_LOG_ERROR, "Buffer size %d is too small, needed : %d\n", buf_size, 4 - s->profile + part1_size);
514  return AVERROR_INVALIDDATA;
515  }
516 
517  buf += 4 - s->profile;
518  buf_size -= 4 - s->profile;
519 
520  memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_epel_pixels_tab, sizeof(s->put_pixels_tab));
521 
522  ret = ff_vp56_init_range_decoder(c, buf, part1_size);
523  if (ret < 0)
524  return ret;
525  buf += part1_size;
526  buf_size -= part1_size;
527 
528  /* A. Dimension information (keyframes only) */
529  if (s->keyframe) {
530  width = vp8_rac_get_uint(c, 12);
531  height = vp8_rac_get_uint(c, 12);
532  hscale = vp8_rac_get_uint(c, 2);
533  vscale = vp8_rac_get_uint(c, 2);
534  if (hscale || vscale)
535  avpriv_request_sample(s->avctx, "Upscaling");
536 
540  sizeof(s->prob->pred16x16));
542  sizeof(s->prob->pred8x8c));
543  for (i = 0; i < 2; i++)
544  memcpy(s->prob->mvc[i], vp7_mv_default_prob[i],
545  sizeof(vp7_mv_default_prob[i]));
546  memset(&s->segmentation, 0, sizeof(s->segmentation));
547  memset(&s->lf_delta, 0, sizeof(s->lf_delta));
548  memcpy(s->prob[0].scan, ff_zigzag_scan, sizeof(s->prob[0].scan));
549  }
550 
551  if (s->keyframe || s->profile > 0)
552  memset(s->inter_dc_pred, 0 , sizeof(s->inter_dc_pred));
553 
554  /* B. Decoding information for all four macroblock-level features */
555  for (i = 0; i < 4; i++) {
556  s->feature_enabled[i] = vp8_rac_get(c);
557  if (s->feature_enabled[i]) {
559 
560  for (j = 0; j < 3; j++)
561  s->feature_index_prob[i][j] =
562  vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
563 
564  if (vp7_feature_value_size[s->profile][i])
565  for (j = 0; j < 4; j++)
566  s->feature_value[i][j] =
568  }
569  }
570 
571  s->segmentation.enabled = 0;
572  s->segmentation.update_map = 0;
573  s->lf_delta.enabled = 0;
574 
575  s->num_coeff_partitions = 1;
576  ret = ff_vp56_init_range_decoder(&s->coeff_partition[0], buf, buf_size);
577  if (ret < 0)
578  return ret;
579 
580  if (!s->macroblocks_base || /* first frame */
581  width != s->avctx->width || height != s->avctx->height ||
582  (width + 15) / 16 != s->mb_width || (height + 15) / 16 != s->mb_height) {
583  if ((ret = vp7_update_dimensions(s, width, height)) < 0)
584  return ret;
585  }
586 
587  /* C. Dequantization indices */
588  vp7_get_quants(s);
589 
590  /* D. Golden frame update flag (a Flag) for interframes only */
591  if (!s->keyframe) {
594  }
595 
596  s->update_last = 1;
597  s->update_probabilities = 1;
598  s->fade_present = 1;
599 
600  if (s->profile > 0) {
602  if (!s->update_probabilities)
603  s->prob[1] = s->prob[0];
604 
605  if (!s->keyframe)
606  s->fade_present = vp8_rac_get(c);
607  }
608 
609  /* E. Fading information for previous frame */
610  if (s->fade_present && vp8_rac_get(c)) {
611  if ((ret = vp7_fade_frame(s ,c)) < 0)
612  return ret;
613  }
614 
615  /* F. Loop filter type */
616  if (!s->profile)
617  s->filter.simple = vp8_rac_get(c);
618 
619  /* G. DCT coefficient ordering specification */
620  if (vp8_rac_get(c))
621  for (i = 1; i < 16; i++)
622  s->prob[0].scan[i] = ff_zigzag_scan[vp8_rac_get_uint(c, 4)];
623 
624  /* H. Loop filter levels */
625  if (s->profile > 0)
626  s->filter.simple = vp8_rac_get(c);
627  s->filter.level = vp8_rac_get_uint(c, 6);
628  s->filter.sharpness = vp8_rac_get_uint(c, 3);
629 
630  /* I. DCT coefficient probability update; 13.3 Token Probability Updates */
632 
633  s->mbskip_enabled = 0;
634 
635  /* J. The remaining frame header data occurs ONLY FOR INTERFRAMES */
636  if (!s->keyframe) {
637  s->prob->intra = vp8_rac_get_uint(c, 8);
638  s->prob->last = vp8_rac_get_uint(c, 8);
640  }
641 
642  return 0;
643 }
644 
645 static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
646 {
647  VP56RangeCoder *c = &s->c;
648  int header_size, hscale, vscale, ret;
649  int width = s->avctx->width;
650  int height = s->avctx->height;
651 
652  if (buf_size < 3) {
653  av_log(s->avctx, AV_LOG_ERROR, "Insufficent data (%d) for header\n", buf_size);
654  return AVERROR_INVALIDDATA;
655  }
656 
657  s->keyframe = !(buf[0] & 1);
658  s->profile = (buf[0]>>1) & 7;
659  s->invisible = !(buf[0] & 0x10);
660  header_size = AV_RL24(buf) >> 5;
661  buf += 3;
662  buf_size -= 3;
663 
664  if (s->profile > 3)
665  av_log(s->avctx, AV_LOG_WARNING, "Unknown profile %d\n", s->profile);
666 
667  if (!s->profile)
669  sizeof(s->put_pixels_tab));
670  else // profile 1-3 use bilinear, 4+ aren't defined so whatever
672  sizeof(s->put_pixels_tab));
673 
674  if (header_size > buf_size - 7 * s->keyframe) {
675  av_log(s->avctx, AV_LOG_ERROR, "Header size larger than data provided\n");
676  return AVERROR_INVALIDDATA;
677  }
678 
679  if (s->keyframe) {
680  if (AV_RL24(buf) != 0x2a019d) {
682  "Invalid start code 0x%x\n", AV_RL24(buf));
683  return AVERROR_INVALIDDATA;
684  }
685  width = AV_RL16(buf + 3) & 0x3fff;
686  height = AV_RL16(buf + 5) & 0x3fff;
687  hscale = buf[4] >> 6;
688  vscale = buf[6] >> 6;
689  buf += 7;
690  buf_size -= 7;
691 
692  if (hscale || vscale)
693  avpriv_request_sample(s->avctx, "Upscaling");
694 
698  sizeof(s->prob->pred16x16));
700  sizeof(s->prob->pred8x8c));
701  memcpy(s->prob->mvc, vp8_mv_default_prob,
702  sizeof(s->prob->mvc));
703  memset(&s->segmentation, 0, sizeof(s->segmentation));
704  memset(&s->lf_delta, 0, sizeof(s->lf_delta));
705  }
706 
707  ret = ff_vp56_init_range_decoder(c, buf, header_size);
708  if (ret < 0)
709  return ret;
710  buf += header_size;
711  buf_size -= header_size;
712 
713  if (s->keyframe) {
714  s->colorspace = vp8_rac_get(c);
715  if (s->colorspace)
716  av_log(s->avctx, AV_LOG_WARNING, "Unspecified colorspace\n");
717  s->fullrange = vp8_rac_get(c);
718  }
719 
720  if ((s->segmentation.enabled = vp8_rac_get(c)))
722  else
723  s->segmentation.update_map = 0; // FIXME: move this to some init function?
724 
725  s->filter.simple = vp8_rac_get(c);
726  s->filter.level = vp8_rac_get_uint(c, 6);
727  s->filter.sharpness = vp8_rac_get_uint(c, 3);
728 
729  if ((s->lf_delta.enabled = vp8_rac_get(c)))
730  if (vp8_rac_get(c))
731  update_lf_deltas(s);
732 
733  if (setup_partitions(s, buf, buf_size)) {
734  av_log(s->avctx, AV_LOG_ERROR, "Invalid partitions\n");
735  return AVERROR_INVALIDDATA;
736  }
737 
738  if (!s->macroblocks_base || /* first frame */
739  width != s->avctx->width || height != s->avctx->height ||
740  (width+15)/16 != s->mb_width || (height+15)/16 != s->mb_height)
741  if ((ret = vp8_update_dimensions(s, width, height)) < 0)
742  return ret;
743 
744  vp8_get_quants(s);
745 
746  if (!s->keyframe) {
747  update_refs(s);
749  s->sign_bias[VP56_FRAME_GOLDEN2 /* altref */] = vp8_rac_get(c);
750  }
751 
752  // if we aren't saving this frame's probabilities for future frames,
753  // make a copy of the current probabilities
754  if (!(s->update_probabilities = vp8_rac_get(c)))
755  s->prob[1] = s->prob[0];
756 
757  s->update_last = s->keyframe || vp8_rac_get(c);
758 
760 
761  if ((s->mbskip_enabled = vp8_rac_get(c)))
762  s->prob->mbskip = vp8_rac_get_uint(c, 8);
763 
764  if (!s->keyframe) {
765  s->prob->intra = vp8_rac_get_uint(c, 8);
766  s->prob->last = vp8_rac_get_uint(c, 8);
767  s->prob->golden = vp8_rac_get_uint(c, 8);
769  }
770 
771  return 0;
772 }
773 
774 static av_always_inline
775 void clamp_mv(VP8Context *s, VP56mv *dst, const VP56mv *src)
776 {
777  dst->x = av_clip(src->x, av_clip(s->mv_min.x, INT16_MIN, INT16_MAX),
778  av_clip(s->mv_max.x, INT16_MIN, INT16_MAX));
779  dst->y = av_clip(src->y, av_clip(s->mv_min.y, INT16_MIN, INT16_MAX),
780  av_clip(s->mv_max.y, INT16_MIN, INT16_MAX));
781 }
782 
783 /**
784  * Motion vector coding, 17.1.
785  */
787 {
788  int bit, x = 0;
789 
790  if (vp56_rac_get_prob_branchy(c, p[0])) {
791  int i;
792 
793  for (i = 0; i < 3; i++)
794  x += vp56_rac_get_prob(c, p[9 + i]) << i;
795  for (i = (vp7 ? 7 : 9); i > 3; i--)
796  x += vp56_rac_get_prob(c, p[9 + i]) << i;
797  if (!(x & (vp7 ? 0xF0 : 0xFFF0)) || vp56_rac_get_prob(c, p[12]))
798  x += 8;
799  } else {
800  // small_mvtree
801  const uint8_t *ps = p + 2;
802  bit = vp56_rac_get_prob(c, *ps);
803  ps += 1 + 3 * bit;
804  x += 4 * bit;
805  bit = vp56_rac_get_prob(c, *ps);
806  ps += 1 + bit;
807  x += 2 * bit;
808  x += vp56_rac_get_prob(c, *ps);
809  }
810 
811  return (x && vp56_rac_get_prob(c, p[1])) ? -x : x;
812 }
813 
815 {
816  return read_mv_component(c, p, 1);
817 }
818 
820 {
821  return read_mv_component(c, p, 0);
822 }
823 
824 static av_always_inline
825 const uint8_t *get_submv_prob(uint32_t left, uint32_t top, int is_vp7)
826 {
827  if (is_vp7)
828  return vp7_submv_prob;
829 
830  if (left == top)
831  return vp8_submv_prob[4 - !!left];
832  if (!top)
833  return vp8_submv_prob[2];
834  return vp8_submv_prob[1 - !!left];
835 }
836 
837 /**
838  * Split motion vector prediction, 16.4.
839  * @returns the number of motion vectors parsed (2, 4 or 16)
840  */
841 static av_always_inline
843  int layout, int is_vp7)
844 {
845  int part_idx;
846  int n, num;
847  VP8Macroblock *top_mb;
848  VP8Macroblock *left_mb = &mb[-1];
849  const uint8_t *mbsplits_left = vp8_mbsplits[left_mb->partitioning];
850  const uint8_t *mbsplits_top, *mbsplits_cur, *firstidx;
851  VP56mv *top_mv;
852  VP56mv *left_mv = left_mb->bmv;
853  VP56mv *cur_mv = mb->bmv;
854 
855  if (!layout) // layout is inlined, s->mb_layout is not
856  top_mb = &mb[2];
857  else
858  top_mb = &mb[-s->mb_width - 1];
859  mbsplits_top = vp8_mbsplits[top_mb->partitioning];
860  top_mv = top_mb->bmv;
861 
865  else
866  part_idx = VP8_SPLITMVMODE_8x8;
867  } else {
868  part_idx = VP8_SPLITMVMODE_4x4;
869  }
870 
871  num = vp8_mbsplit_count[part_idx];
872  mbsplits_cur = vp8_mbsplits[part_idx],
873  firstidx = vp8_mbfirstidx[part_idx];
874  mb->partitioning = part_idx;
875 
876  for (n = 0; n < num; n++) {
877  int k = firstidx[n];
878  uint32_t left, above;
879  const uint8_t *submv_prob;
880 
881  if (!(k & 3))
882  left = AV_RN32A(&left_mv[mbsplits_left[k + 3]]);
883  else
884  left = AV_RN32A(&cur_mv[mbsplits_cur[k - 1]]);
885  if (k <= 3)
886  above = AV_RN32A(&top_mv[mbsplits_top[k + 12]]);
887  else
888  above = AV_RN32A(&cur_mv[mbsplits_cur[k - 4]]);
889 
890  submv_prob = get_submv_prob(left, above, is_vp7);
891 
892  if (vp56_rac_get_prob_branchy(c, submv_prob[0])) {
893  if (vp56_rac_get_prob_branchy(c, submv_prob[1])) {
894  if (vp56_rac_get_prob_branchy(c, submv_prob[2])) {
895  mb->bmv[n].y = mb->mv.y +
896  read_mv_component(c, s->prob->mvc[0], is_vp7);
897  mb->bmv[n].x = mb->mv.x +
898  read_mv_component(c, s->prob->mvc[1], is_vp7);
899  } else {
900  AV_ZERO32(&mb->bmv[n]);
901  }
902  } else {
903  AV_WN32A(&mb->bmv[n], above);
904  }
905  } else {
906  AV_WN32A(&mb->bmv[n], left);
907  }
908  }
909 
910  return num;
911 }
912 
913 /**
914  * The vp7 reference decoder uses a padding macroblock column (added to right
915  * edge of the frame) to guard against illegal macroblock offsets. The
916  * algorithm has bugs that permit offsets to straddle the padding column.
917  * This function replicates those bugs.
918  *
919  * @param[out] edge_x macroblock x address
920  * @param[out] edge_y macroblock y address
921  *
922  * @return macroblock offset legal (boolean)
923  */
924 static int vp7_calculate_mb_offset(int mb_x, int mb_y, int mb_width,
925  int xoffset, int yoffset, int boundary,
926  int *edge_x, int *edge_y)
927 {
928  int vwidth = mb_width + 1;
929  int new = (mb_y + yoffset) * vwidth + mb_x + xoffset;
930  if (new < boundary || new % vwidth == vwidth - 1)
931  return 0;
932  *edge_y = new / vwidth;
933  *edge_x = new % vwidth;
934  return 1;
935 }
936 
937 static const VP56mv *get_bmv_ptr(const VP8Macroblock *mb, int subblock)
938 {
939  return &mb->bmv[mb->mode == VP8_MVMODE_SPLIT ? vp8_mbsplits[mb->partitioning][subblock] : 0];
940 }
941 
942 static av_always_inline
944  int mb_x, int mb_y, int layout)
945 {
946  VP8Macroblock *mb_edge[12];
947  enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR };
948  enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
949  int idx = CNT_ZERO;
950  VP56mv near_mv[3];
951  uint8_t cnt[3] = { 0 };
952  VP56RangeCoder *c = &s->c;
953  int i;
954 
955  AV_ZERO32(&near_mv[0]);
956  AV_ZERO32(&near_mv[1]);
957  AV_ZERO32(&near_mv[2]);
958 
959  for (i = 0; i < VP7_MV_PRED_COUNT; i++) {
960  const VP7MVPred * pred = &vp7_mv_pred[i];
961  int edge_x, edge_y;
962 
963  if (vp7_calculate_mb_offset(mb_x, mb_y, s->mb_width, pred->xoffset,
964  pred->yoffset, !s->profile, &edge_x, &edge_y)) {
965  VP8Macroblock *edge = mb_edge[i] = (s->mb_layout == 1)
966  ? s->macroblocks_base + 1 + edge_x +
967  (s->mb_width + 1) * (edge_y + 1)
968  : s->macroblocks + edge_x +
969  (s->mb_height - edge_y - 1) * 2;
970  uint32_t mv = AV_RN32A(get_bmv_ptr(edge, vp7_mv_pred[i].subblock));
971  if (mv) {
972  if (AV_RN32A(&near_mv[CNT_NEAREST])) {
973  if (mv == AV_RN32A(&near_mv[CNT_NEAREST])) {
974  idx = CNT_NEAREST;
975  } else if (AV_RN32A(&near_mv[CNT_NEAR])) {
976  if (mv != AV_RN32A(&near_mv[CNT_NEAR]))
977  continue;
978  idx = CNT_NEAR;
979  } else {
980  AV_WN32A(&near_mv[CNT_NEAR], mv);
981  idx = CNT_NEAR;
982  }
983  } else {
984  AV_WN32A(&near_mv[CNT_NEAREST], mv);
985  idx = CNT_NEAREST;
986  }
987  } else {
988  idx = CNT_ZERO;
989  }
990  } else {
991  idx = CNT_ZERO;
992  }
993  cnt[idx] += vp7_mv_pred[i].score;
994  }
995 
997 
998  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_ZERO]][0])) {
999  mb->mode = VP8_MVMODE_MV;
1000 
1001  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAREST]][1])) {
1002 
1003  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAR]][2])) {
1004 
1005  if (cnt[CNT_NEAREST] > cnt[CNT_NEAR])
1006  AV_WN32A(&mb->mv, cnt[CNT_ZERO] > cnt[CNT_NEAREST] ? 0 : AV_RN32A(&near_mv[CNT_NEAREST]));
1007  else
1008  AV_WN32A(&mb->mv, cnt[CNT_ZERO] > cnt[CNT_NEAR] ? 0 : AV_RN32A(&near_mv[CNT_NEAR]));
1009 
1010  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAR]][3])) {
1011  mb->mode = VP8_MVMODE_SPLIT;
1012  mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout, IS_VP7) - 1];
1013  } else {
1014  mb->mv.y += vp7_read_mv_component(c, s->prob->mvc[0]);
1015  mb->mv.x += vp7_read_mv_component(c, s->prob->mvc[1]);
1016  mb->bmv[0] = mb->mv;
1017  }
1018  } else {
1019  mb->mv = near_mv[CNT_NEAR];
1020  mb->bmv[0] = mb->mv;
1021  }
1022  } else {
1023  mb->mv = near_mv[CNT_NEAREST];
1024  mb->bmv[0] = mb->mv;
1025  }
1026  } else {
1027  mb->mode = VP8_MVMODE_ZERO;
1028  AV_ZERO32(&mb->mv);
1029  mb->bmv[0] = mb->mv;
1030  }
1031 }
1032 
1033 static av_always_inline
1035  int mb_x, int mb_y, int layout)
1036 {
1037  VP8Macroblock *mb_edge[3] = { 0 /* top */,
1038  mb - 1 /* left */,
1039  0 /* top-left */ };
1040  enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
1041  enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
1042  int idx = CNT_ZERO;
1043  int cur_sign_bias = s->sign_bias[mb->ref_frame];
1044  int8_t *sign_bias = s->sign_bias;
1045  VP56mv near_mv[4];
1046  uint8_t cnt[4] = { 0 };
1047  VP56RangeCoder *c = &s->c;
1048 
1049  if (!layout) { // layout is inlined (s->mb_layout is not)
1050  mb_edge[0] = mb + 2;
1051  mb_edge[2] = mb + 1;
1052  } else {
1053  mb_edge[0] = mb - s->mb_width - 1;
1054  mb_edge[2] = mb - s->mb_width - 2;
1055  }
1056 
1057  AV_ZERO32(&near_mv[0]);
1058  AV_ZERO32(&near_mv[1]);
1059  AV_ZERO32(&near_mv[2]);
1060 
1061  /* Process MB on top, left and top-left */
1062 #define MV_EDGE_CHECK(n) \
1063  { \
1064  VP8Macroblock *edge = mb_edge[n]; \
1065  int edge_ref = edge->ref_frame; \
1066  if (edge_ref != VP56_FRAME_CURRENT) { \
1067  uint32_t mv = AV_RN32A(&edge->mv); \
1068  if (mv) { \
1069  if (cur_sign_bias != sign_bias[edge_ref]) { \
1070  /* SWAR negate of the values in mv. */ \
1071  mv = ~mv; \
1072  mv = ((mv & 0x7fff7fff) + \
1073  0x00010001) ^ (mv & 0x80008000); \
1074  } \
1075  if (!n || mv != AV_RN32A(&near_mv[idx])) \
1076  AV_WN32A(&near_mv[++idx], mv); \
1077  cnt[idx] += 1 + (n != 2); \
1078  } else \
1079  cnt[CNT_ZERO] += 1 + (n != 2); \
1080  } \
1081  }
1082 
1083  MV_EDGE_CHECK(0)
1084  MV_EDGE_CHECK(1)
1085  MV_EDGE_CHECK(2)
1086 
1088  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_ZERO]][0])) {
1089  mb->mode = VP8_MVMODE_MV;
1090 
1091  /* If we have three distinct MVs, merge first and last if they're the same */
1092  if (cnt[CNT_SPLITMV] &&
1093  AV_RN32A(&near_mv[1 + VP8_EDGE_TOP]) == AV_RN32A(&near_mv[1 + VP8_EDGE_TOPLEFT]))
1094  cnt[CNT_NEAREST] += 1;
1095 
1096  /* Swap near and nearest if necessary */
1097  if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
1098  FFSWAP(uint8_t, cnt[CNT_NEAREST], cnt[CNT_NEAR]);
1099  FFSWAP( VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
1100  }
1101 
1102  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) {
1103  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAR]][2])) {
1104  /* Choose the best mv out of 0,0 and the nearest mv */
1105  clamp_mv(s, &mb->mv, &near_mv[CNT_ZERO + (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])]);
1106  cnt[CNT_SPLITMV] = ((mb_edge[VP8_EDGE_LEFT]->mode == VP8_MVMODE_SPLIT) +
1107  (mb_edge[VP8_EDGE_TOP]->mode == VP8_MVMODE_SPLIT)) * 2 +
1108  (mb_edge[VP8_EDGE_TOPLEFT]->mode == VP8_MVMODE_SPLIT);
1109 
1110  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_SPLITMV]][3])) {
1111  mb->mode = VP8_MVMODE_SPLIT;
1112  mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout, IS_VP8) - 1];
1113  } else {
1114  mb->mv.y += vp8_read_mv_component(c, s->prob->mvc[0]);
1115  mb->mv.x += vp8_read_mv_component(c, s->prob->mvc[1]);
1116  mb->bmv[0] = mb->mv;
1117  }
1118  } else {
1119  clamp_mv(s, &mb->mv, &near_mv[CNT_NEAR]);
1120  mb->bmv[0] = mb->mv;
1121  }
1122  } else {
1123  clamp_mv(s, &mb->mv, &near_mv[CNT_NEAREST]);
1124  mb->bmv[0] = mb->mv;
1125  }
1126  } else {
1127  mb->mode = VP8_MVMODE_ZERO;
1128  AV_ZERO32(&mb->mv);
1129  mb->bmv[0] = mb->mv;
1130  }
1131 }
1132 
1133 static av_always_inline
1135  int mb_x, int keyframe, int layout)
1136 {
1137  uint8_t *intra4x4 = mb->intra4x4_pred_mode_mb;
1138 
1139  if (layout) {
1140  VP8Macroblock *mb_top = mb - s->mb_width - 1;
1141  memcpy(mb->intra4x4_pred_mode_top, mb_top->intra4x4_pred_mode_top, 4);
1142  }
1143  if (keyframe) {
1144  int x, y;
1145  uint8_t *top;
1146  uint8_t *const left = s->intra4x4_pred_mode_left;
1147  if (layout)
1148  top = mb->intra4x4_pred_mode_top;
1149  else
1150  top = s->intra4x4_pred_mode_top + 4 * mb_x;
1151  for (y = 0; y < 4; y++) {
1152  for (x = 0; x < 4; x++) {
1153  const uint8_t *ctx;
1154  ctx = vp8_pred4x4_prob_intra[top[x]][left[y]];
1155  *intra4x4 = vp8_rac_get_tree(c, vp8_pred4x4_tree, ctx);
1156  left[y] = top[x] = *intra4x4;
1157  intra4x4++;
1158  }
1159  }
1160  } else {
1161  int i;
1162  for (i = 0; i < 16; i++)
1163  intra4x4[i] = vp8_rac_get_tree(c, vp8_pred4x4_tree,
1165  }
1166 }
1167 
1168 static av_always_inline
1169 void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
1170  uint8_t *segment, uint8_t *ref, int layout, int is_vp7)
1171 {
1172  VP56RangeCoder *c = &s->c;
1173  static const char *vp7_feature_name[] = { "q-index",
1174  "lf-delta",
1175  "partial-golden-update",
1176  "blit-pitch" };
1177  if (is_vp7) {
1178  int i;
1179  *segment = 0;
1180  for (i = 0; i < 4; i++) {
1181  if (s->feature_enabled[i]) {
1184  s->feature_index_prob[i]);
1186  "Feature %s present in macroblock (value 0x%x)\n",
1187  vp7_feature_name[i], s->feature_value[i][index]);
1188  }
1189  }
1190  }
1191  } else if (s->segmentation.update_map) {
1192  int bit = vp56_rac_get_prob(c, s->prob->segmentid[0]);
1193  *segment = vp56_rac_get_prob(c, s->prob->segmentid[1+bit]) + 2*bit;
1194  } else if (s->segmentation.enabled)
1195  *segment = ref ? *ref : *segment;
1196  mb->segment = *segment;
1197 
1198  mb->skip = s->mbskip_enabled ? vp56_rac_get_prob(c, s->prob->mbskip) : 0;
1199 
1200  if (s->keyframe) {
1203 
1204  if (mb->mode == MODE_I4x4) {
1205  decode_intra4x4_modes(s, c, mb, mb_x, 1, layout);
1206  } else {
1207  const uint32_t modes = (is_vp7 ? vp7_pred4x4_mode
1208  : vp8_pred4x4_mode)[mb->mode] * 0x01010101u;
1209  if (s->mb_layout)
1210  AV_WN32A(mb->intra4x4_pred_mode_top, modes);
1211  else
1212  AV_WN32A(s->intra4x4_pred_mode_top + 4 * mb_x, modes);
1213  AV_WN32A(s->intra4x4_pred_mode_left, modes);
1214  }
1215 
1219  } else if (vp56_rac_get_prob_branchy(c, s->prob->intra)) {
1220  // inter MB, 16.2
1221  if (vp56_rac_get_prob_branchy(c, s->prob->last))
1222  mb->ref_frame =
1223  (!is_vp7 && vp56_rac_get_prob(c, s->prob->golden)) ? VP56_FRAME_GOLDEN2 /* altref */
1225  else
1227  s->ref_count[mb->ref_frame - 1]++;
1228 
1229  // motion vectors, 16.3
1230  if (is_vp7)
1231  vp7_decode_mvs(s, mb, mb_x, mb_y, layout);
1232  else
1233  vp8_decode_mvs(s, mb, mb_x, mb_y, layout);
1234  } else {
1235  // intra MB, 16.1
1237 
1238  if (mb->mode == MODE_I4x4)
1239  decode_intra4x4_modes(s, c, mb, mb_x, 0, layout);
1240 
1242  s->prob->pred8x8c);
1245  AV_ZERO32(&mb->bmv[0]);
1246  }
1247 }
1248 
1249 /**
1250  * @param r arithmetic bitstream reader context
1251  * @param block destination for block coefficients
1252  * @param probs probabilities to use when reading trees from the bitstream
1253  * @param i initial coeff index, 0 unless a separate DC block is coded
1254  * @param qmul array holding the dc/ac dequant factor at position 0/1
1255  *
1256  * @return 0 if no coeffs were decoded
1257  * otherwise, the index of the last coeff decoded plus one
1258  */
1259 static av_always_inline
1261  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1262  int i, uint8_t *token_prob, int16_t qmul[2],
1263  const uint8_t scan[16], int vp7)
1264 {
1265  VP56RangeCoder c = *r;
1266  goto skip_eob;
1267  do {
1268  int coeff;
1269 restart:
1270  if (!vp56_rac_get_prob_branchy(&c, token_prob[0])) // DCT_EOB
1271  break;
1272 
1273 skip_eob:
1274  if (!vp56_rac_get_prob_branchy(&c, token_prob[1])) { // DCT_0
1275  if (++i == 16)
1276  break; // invalid input; blocks should end with EOB
1277  token_prob = probs[i][0];
1278  if (vp7)
1279  goto restart;
1280  goto skip_eob;
1281  }
1282 
1283  if (!vp56_rac_get_prob_branchy(&c, token_prob[2])) { // DCT_1
1284  coeff = 1;
1285  token_prob = probs[i + 1][1];
1286  } else {
1287  if (!vp56_rac_get_prob_branchy(&c, token_prob[3])) { // DCT 2,3,4
1288  coeff = vp56_rac_get_prob_branchy(&c, token_prob[4]);
1289  if (coeff)
1290  coeff += vp56_rac_get_prob(&c, token_prob[5]);
1291  coeff += 2;
1292  } else {
1293  // DCT_CAT*
1294  if (!vp56_rac_get_prob_branchy(&c, token_prob[6])) {
1295  if (!vp56_rac_get_prob_branchy(&c, token_prob[7])) { // DCT_CAT1
1296  coeff = 5 + vp56_rac_get_prob(&c, vp8_dct_cat1_prob[0]);
1297  } else { // DCT_CAT2
1298  coeff = 7;
1299  coeff += vp56_rac_get_prob(&c, vp8_dct_cat2_prob[0]) << 1;
1300  coeff += vp56_rac_get_prob(&c, vp8_dct_cat2_prob[1]);
1301  }
1302  } else { // DCT_CAT3 and up
1303  int a = vp56_rac_get_prob(&c, token_prob[8]);
1304  int b = vp56_rac_get_prob(&c, token_prob[9 + a]);
1305  int cat = (a << 1) + b;
1306  coeff = 3 + (8 << cat);
1307  coeff += vp8_rac_get_coeff(&c, ff_vp8_dct_cat_prob[cat]);
1308  }
1309  }
1310  token_prob = probs[i + 1][2];
1311  }
1312  block[scan[i]] = (vp8_rac_get(&c) ? -coeff : coeff) * qmul[!!i];
1313  } while (++i < 16);
1314 
1315  *r = c;
1316  return i;
1317 }
1318 
1319 static av_always_inline
1320 int inter_predict_dc(int16_t block[16], int16_t pred[2])
1321 {
1322  int16_t dc = block[0];
1323  int ret = 0;
1324 
1325  if (pred[1] > 3) {
1326  dc += pred[0];
1327  ret = 1;
1328  }
1329 
1330  if (!pred[0] | !dc | ((int32_t)pred[0] ^ (int32_t)dc) >> 31) {
1331  block[0] = pred[0] = dc;
1332  pred[1] = 0;
1333  } else {
1334  if (pred[0] == dc)
1335  pred[1]++;
1336  block[0] = pred[0] = dc;
1337  }
1338 
1339  return ret;
1340 }
1341 
1343  int16_t block[16],
1344  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1345  int i, uint8_t *token_prob,
1346  int16_t qmul[2],
1347  const uint8_t scan[16])
1348 {
1349  return decode_block_coeffs_internal(r, block, probs, i,
1350  token_prob, qmul, scan, IS_VP7);
1351 }
1352 
1353 #ifndef vp8_decode_block_coeffs_internal
1355  int16_t block[16],
1356  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1357  int i, uint8_t *token_prob,
1358  int16_t qmul[2])
1359 {
1360  return decode_block_coeffs_internal(r, block, probs, i,
1361  token_prob, qmul, ff_zigzag_scan, IS_VP8);
1362 }
1363 #endif
1364 
1365 /**
1366  * @param c arithmetic bitstream reader context
1367  * @param block destination for block coefficients
1368  * @param probs probabilities to use when reading trees from the bitstream
1369  * @param i initial coeff index, 0 unless a separate DC block is coded
1370  * @param zero_nhood the initial prediction context for number of surrounding
1371  * all-zero blocks (only left/top, so 0-2)
1372  * @param qmul array holding the dc/ac dequant factor at position 0/1
1373  * @param scan scan pattern (VP7 only)
1374  *
1375  * @return 0 if no coeffs were decoded
1376  * otherwise, the index of the last coeff decoded plus one
1377  */
1378 static av_always_inline
1380  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1381  int i, int zero_nhood, int16_t qmul[2],
1382  const uint8_t scan[16], int vp7)
1383 {
1384  uint8_t *token_prob = probs[i][zero_nhood];
1385  if (!vp56_rac_get_prob_branchy(c, token_prob[0])) // DCT_EOB
1386  return 0;
1387  return vp7 ? vp7_decode_block_coeffs_internal(c, block, probs, i,
1388  token_prob, qmul, scan)
1389  : vp8_decode_block_coeffs_internal(c, block, probs, i,
1390  token_prob, qmul);
1391 }
1392 
1393 static av_always_inline
1395  VP8Macroblock *mb, uint8_t t_nnz[9], uint8_t l_nnz[9],
1396  int is_vp7)
1397 {
1398  int i, x, y, luma_start = 0, luma_ctx = 3;
1399  int nnz_pred, nnz, nnz_total = 0;
1400  int segment = mb->segment;
1401  int block_dc = 0;
1402 
1403  if (mb->mode != MODE_I4x4 && (is_vp7 || mb->mode != VP8_MVMODE_SPLIT)) {
1404  nnz_pred = t_nnz[8] + l_nnz[8];
1405 
1406  // decode DC values and do hadamard
1407  nnz = decode_block_coeffs(c, td->block_dc, s->prob->token[1], 0,
1408  nnz_pred, s->qmat[segment].luma_dc_qmul,
1409  ff_zigzag_scan, is_vp7);
1410  l_nnz[8] = t_nnz[8] = !!nnz;
1411 
1412  if (is_vp7 && mb->mode > MODE_I4x4) {
1413  nnz |= inter_predict_dc(td->block_dc,
1414  s->inter_dc_pred[mb->ref_frame - 1]);
1415  }
1416 
1417  if (nnz) {
1418  nnz_total += nnz;
1419  block_dc = 1;
1420  if (nnz == 1)
1421  s->vp8dsp.vp8_luma_dc_wht_dc(td->block, td->block_dc);
1422  else
1423  s->vp8dsp.vp8_luma_dc_wht(td->block, td->block_dc);
1424  }
1425  luma_start = 1;
1426  luma_ctx = 0;
1427  }
1428 
1429  // luma blocks
1430  for (y = 0; y < 4; y++)
1431  for (x = 0; x < 4; x++) {
1432  nnz_pred = l_nnz[y] + t_nnz[x];
1433  nnz = decode_block_coeffs(c, td->block[y][x],
1434  s->prob->token[luma_ctx],
1435  luma_start, nnz_pred,
1436  s->qmat[segment].luma_qmul,
1437  s->prob[0].scan, is_vp7);
1438  /* nnz+block_dc may be one more than the actual last index,
1439  * but we don't care */
1440  td->non_zero_count_cache[y][x] = nnz + block_dc;
1441  t_nnz[x] = l_nnz[y] = !!nnz;
1442  nnz_total += nnz;
1443  }
1444 
1445  // chroma blocks
1446  // TODO: what to do about dimensions? 2nd dim for luma is x,
1447  // but for chroma it's (y<<1)|x
1448  for (i = 4; i < 6; i++)
1449  for (y = 0; y < 2; y++)
1450  for (x = 0; x < 2; x++) {
1451  nnz_pred = l_nnz[i + 2 * y] + t_nnz[i + 2 * x];
1452  nnz = decode_block_coeffs(c, td->block[i][(y << 1) + x],
1453  s->prob->token[2], 0, nnz_pred,
1454  s->qmat[segment].chroma_qmul,
1455  s->prob[0].scan, is_vp7);
1456  td->non_zero_count_cache[i][(y << 1) + x] = nnz;
1457  t_nnz[i + 2 * x] = l_nnz[i + 2 * y] = !!nnz;
1458  nnz_total += nnz;
1459  }
1460 
1461  // if there were no coded coeffs despite the macroblock not being marked skip,
1462  // we MUST not do the inner loop filter and should not do IDCT
1463  // Since skip isn't used for bitstream prediction, just manually set it.
1464  if (!nnz_total)
1465  mb->skip = 1;
1466 }
1467 
1468 static av_always_inline
1469 void backup_mb_border(uint8_t *top_border, uint8_t *src_y,
1470  uint8_t *src_cb, uint8_t *src_cr,
1471  ptrdiff_t linesize, ptrdiff_t uvlinesize, int simple)
1472 {
1473  AV_COPY128(top_border, src_y + 15 * linesize);
1474  if (!simple) {
1475  AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
1476  AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
1477  }
1478 }
1479 
1480 static av_always_inline
1481 void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb,
1482  uint8_t *src_cr, ptrdiff_t linesize, ptrdiff_t uvlinesize, int mb_x,
1483  int mb_y, int mb_width, int simple, int xchg)
1484 {
1485  uint8_t *top_border_m1 = top_border - 32; // for TL prediction
1486  src_y -= linesize;
1487  src_cb -= uvlinesize;
1488  src_cr -= uvlinesize;
1489 
1490 #define XCHG(a, b, xchg) \
1491  do { \
1492  if (xchg) \
1493  AV_SWAP64(b, a); \
1494  else \
1495  AV_COPY64(b, a); \
1496  } while (0)
1497 
1498  XCHG(top_border_m1 + 8, src_y - 8, xchg);
1499  XCHG(top_border, src_y, xchg);
1500  XCHG(top_border + 8, src_y + 8, 1);
1501  if (mb_x < mb_width - 1)
1502  XCHG(top_border + 32, src_y + 16, 1);
1503 
1504  // only copy chroma for normal loop filter
1505  // or to initialize the top row to 127
1506  if (!simple || !mb_y) {
1507  XCHG(top_border_m1 + 16, src_cb - 8, xchg);
1508  XCHG(top_border_m1 + 24, src_cr - 8, xchg);
1509  XCHG(top_border + 16, src_cb, 1);
1510  XCHG(top_border + 24, src_cr, 1);
1511  }
1512 }
1513 
1514 static av_always_inline
1515 int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
1516 {
1517  if (!mb_x)
1518  return mb_y ? TOP_DC_PRED8x8 : DC_128_PRED8x8;
1519  else
1520  return mb_y ? mode : LEFT_DC_PRED8x8;
1521 }
1522 
1523 static av_always_inline
1524 int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y, int vp7)
1525 {
1526  if (!mb_x)
1527  return mb_y ? VERT_PRED8x8 : (vp7 ? DC_128_PRED8x8 : DC_129_PRED8x8);
1528  else
1529  return mb_y ? mode : HOR_PRED8x8;
1530 }
1531 
1532 static av_always_inline
1533 int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y, int vp7)
1534 {
1535  switch (mode) {
1536  case DC_PRED8x8:
1537  return check_dc_pred8x8_mode(mode, mb_x, mb_y);
1538  case VERT_PRED8x8:
1539  return !mb_y ? (vp7 ? DC_128_PRED8x8 : DC_127_PRED8x8) : mode;
1540  case HOR_PRED8x8:
1541  return !mb_x ? (vp7 ? DC_128_PRED8x8 : DC_129_PRED8x8) : mode;
1542  case PLANE_PRED8x8: /* TM */
1543  return check_tm_pred8x8_mode(mode, mb_x, mb_y, vp7);
1544  }
1545  return mode;
1546 }
1547 
1548 static av_always_inline
1549 int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y, int vp7)
1550 {
1551  if (!mb_x) {
1552  return mb_y ? VERT_VP8_PRED : (vp7 ? DC_128_PRED : DC_129_PRED);
1553  } else {
1554  return mb_y ? mode : HOR_VP8_PRED;
1555  }
1556 }
1557 
1558 static av_always_inline
1559 int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y,
1560  int *copy_buf, int vp7)
1561 {
1562  switch (mode) {
1563  case VERT_PRED:
1564  if (!mb_x && mb_y) {
1565  *copy_buf = 1;
1566  return mode;
1567  }
1568  /* fall-through */
1569  case DIAG_DOWN_LEFT_PRED:
1570  case VERT_LEFT_PRED:
1571  return !mb_y ? (vp7 ? DC_128_PRED : DC_127_PRED) : mode;
1572  case HOR_PRED:
1573  if (!mb_y) {
1574  *copy_buf = 1;
1575  return mode;
1576  }
1577  /* fall-through */
1578  case HOR_UP_PRED:
1579  return !mb_x ? (vp7 ? DC_128_PRED : DC_129_PRED) : mode;
1580  case TM_VP8_PRED:
1581  return check_tm_pred4x4_mode(mode, mb_x, mb_y, vp7);
1582  case DC_PRED: /* 4x4 DC doesn't use the same "H.264-style" exceptions
1583  * as 16x16/8x8 DC */
1584  case DIAG_DOWN_RIGHT_PRED:
1585  case VERT_RIGHT_PRED:
1586  case HOR_DOWN_PRED:
1587  if (!mb_y || !mb_x)
1588  *copy_buf = 1;
1589  return mode;
1590  }
1591  return mode;
1592 }
1593 
1594 static av_always_inline
1596  VP8Macroblock *mb, int mb_x, int mb_y, int is_vp7)
1597 {
1598  int x, y, mode, nnz;
1599  uint32_t tr;
1600 
1601  /* for the first row, we need to run xchg_mb_border to init the top edge
1602  * to 127 otherwise, skip it if we aren't going to deblock */
1603  if (mb_y && (s->deblock_filter || !mb_y) && td->thread_nr == 0)
1604  xchg_mb_border(s->top_border[mb_x + 1], dst[0], dst[1], dst[2],
1605  s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1606  s->filter.simple, 1);
1607 
1608  if (mb->mode < MODE_I4x4) {
1609  mode = check_intra_pred8x8_mode_emuedge(mb->mode, mb_x, mb_y, is_vp7);
1610  s->hpc.pred16x16[mode](dst[0], s->linesize);
1611  } else {
1612  uint8_t *ptr = dst[0];
1613  uint8_t *intra4x4 = mb->intra4x4_pred_mode_mb;
1614  const uint8_t lo = is_vp7 ? 128 : 127;
1615  const uint8_t hi = is_vp7 ? 128 : 129;
1616  uint8_t tr_top[4] = { lo, lo, lo, lo };
1617 
1618  // all blocks on the right edge of the macroblock use bottom edge
1619  // the top macroblock for their topright edge
1620  uint8_t *tr_right = ptr - s->linesize + 16;
1621 
1622  // if we're on the right edge of the frame, said edge is extended
1623  // from the top macroblock
1624  if (mb_y && mb_x == s->mb_width - 1) {
1625  tr = tr_right[-1] * 0x01010101u;
1626  tr_right = (uint8_t *) &tr;
1627  }
1628 
1629  if (mb->skip)
1631 
1632  for (y = 0; y < 4; y++) {
1633  uint8_t *topright = ptr + 4 - s->linesize;
1634  for (x = 0; x < 4; x++) {
1635  int copy = 0;
1636  ptrdiff_t linesize = s->linesize;
1637  uint8_t *dst = ptr + 4 * x;
1638  LOCAL_ALIGNED(4, uint8_t, copy_dst, [5 * 8]);
1639 
1640  if ((y == 0 || x == 3) && mb_y == 0) {
1641  topright = tr_top;
1642  } else if (x == 3)
1643  topright = tr_right;
1644 
1645  mode = check_intra_pred4x4_mode_emuedge(intra4x4[x], mb_x + x,
1646  mb_y + y, &copy, is_vp7);
1647  if (copy) {
1648  dst = copy_dst + 12;
1649  linesize = 8;
1650  if (!(mb_y + y)) {
1651  copy_dst[3] = lo;
1652  AV_WN32A(copy_dst + 4, lo * 0x01010101U);
1653  } else {
1654  AV_COPY32(copy_dst + 4, ptr + 4 * x - s->linesize);
1655  if (!(mb_x + x)) {
1656  copy_dst[3] = hi;
1657  } else {
1658  copy_dst[3] = ptr[4 * x - s->linesize - 1];
1659  }
1660  }
1661  if (!(mb_x + x)) {
1662  copy_dst[11] =
1663  copy_dst[19] =
1664  copy_dst[27] =
1665  copy_dst[35] = hi;
1666  } else {
1667  copy_dst[11] = ptr[4 * x - 1];
1668  copy_dst[19] = ptr[4 * x + s->linesize - 1];
1669  copy_dst[27] = ptr[4 * x + s->linesize * 2 - 1];
1670  copy_dst[35] = ptr[4 * x + s->linesize * 3 - 1];
1671  }
1672  }
1673  s->hpc.pred4x4[mode](dst, topright, linesize);
1674  if (copy) {
1675  AV_COPY32(ptr + 4 * x, copy_dst + 12);
1676  AV_COPY32(ptr + 4 * x + s->linesize, copy_dst + 20);
1677  AV_COPY32(ptr + 4 * x + s->linesize * 2, copy_dst + 28);
1678  AV_COPY32(ptr + 4 * x + s->linesize * 3, copy_dst + 36);
1679  }
1680 
1681  nnz = td->non_zero_count_cache[y][x];
1682  if (nnz) {
1683  if (nnz == 1)
1684  s->vp8dsp.vp8_idct_dc_add(ptr + 4 * x,
1685  td->block[y][x], s->linesize);
1686  else
1687  s->vp8dsp.vp8_idct_add(ptr + 4 * x,
1688  td->block[y][x], s->linesize);
1689  }
1690  topright += 4;
1691  }
1692 
1693  ptr += 4 * s->linesize;
1694  intra4x4 += 4;
1695  }
1696  }
1697 
1699  mb_x, mb_y, is_vp7);
1700  s->hpc.pred8x8[mode](dst[1], s->uvlinesize);
1701  s->hpc.pred8x8[mode](dst[2], s->uvlinesize);
1702 
1703  if (mb_y && (s->deblock_filter || !mb_y) && td->thread_nr == 0)
1704  xchg_mb_border(s->top_border[mb_x + 1], dst[0], dst[1], dst[2],
1705  s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1706  s->filter.simple, 0);
1707 }
1708 
1709 static const uint8_t subpel_idx[3][8] = {
1710  { 0, 1, 2, 1, 2, 1, 2, 1 }, // nr. of left extra pixels,
1711  // also function pointer index
1712  { 0, 3, 5, 3, 5, 3, 5, 3 }, // nr. of extra pixels required
1713  { 0, 2, 3, 2, 3, 2, 3, 2 }, // nr. of right extra pixels
1714 };
1715 
1716 /**
1717  * luma MC function
1718  *
1719  * @param s VP8 decoding context
1720  * @param dst target buffer for block data at block position
1721  * @param ref reference picture buffer at origin (0, 0)
1722  * @param mv motion vector (relative to block position) to get pixel data from
1723  * @param x_off horizontal position of block from origin (0, 0)
1724  * @param y_off vertical position of block from origin (0, 0)
1725  * @param block_w width of block (16, 8 or 4)
1726  * @param block_h height of block (always same as block_w)
1727  * @param width width of src/dst plane data
1728  * @param height height of src/dst plane data
1729  * @param linesize size of a single line of plane data, including padding
1730  * @param mc_func motion compensation function pointers (bilinear or sixtap MC)
1731  */
1732 static av_always_inline
1734  ThreadFrame *ref, const VP56mv *mv,
1735  int x_off, int y_off, int block_w, int block_h,
1736  int width, int height, ptrdiff_t linesize,
1737  vp8_mc_func mc_func[3][3])
1738 {
1739  uint8_t *src = ref->f->data[0];
1740 
1741  if (AV_RN32A(mv)) {
1742  ptrdiff_t src_linesize = linesize;
1743 
1744  int mx = (mv->x * 2) & 7, mx_idx = subpel_idx[0][mx];
1745  int my = (mv->y * 2) & 7, my_idx = subpel_idx[0][my];
1746 
1747  x_off += mv->x >> 2;
1748  y_off += mv->y >> 2;
1749 
1750  // edge emulation
1751  ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 4, 0);
1752  src += y_off * linesize + x_off;
1753  if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1754  y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1756  src - my_idx * linesize - mx_idx,
1757  EDGE_EMU_LINESIZE, linesize,
1758  block_w + subpel_idx[1][mx],
1759  block_h + subpel_idx[1][my],
1760  x_off - mx_idx, y_off - my_idx,
1761  width, height);
1762  src = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1763  src_linesize = EDGE_EMU_LINESIZE;
1764  }
1765  mc_func[my_idx][mx_idx](dst, linesize, src, src_linesize, block_h, mx, my);
1766  } else {
1767  ff_thread_await_progress(ref, (3 + y_off + block_h) >> 4, 0);
1768  mc_func[0][0](dst, linesize, src + y_off * linesize + x_off,
1769  linesize, block_h, 0, 0);
1770  }
1771 }
1772 
1773 /**
1774  * chroma MC function
1775  *
1776  * @param s VP8 decoding context
1777  * @param dst1 target buffer for block data at block position (U plane)
1778  * @param dst2 target buffer for block data at block position (V plane)
1779  * @param ref reference picture buffer at origin (0, 0)
1780  * @param mv motion vector (relative to block position) to get pixel data from
1781  * @param x_off horizontal position of block from origin (0, 0)
1782  * @param y_off vertical position of block from origin (0, 0)
1783  * @param block_w width of block (16, 8 or 4)
1784  * @param block_h height of block (always same as block_w)
1785  * @param width width of src/dst plane data
1786  * @param height height of src/dst plane data
1787  * @param linesize size of a single line of plane data, including padding
1788  * @param mc_func motion compensation function pointers (bilinear or sixtap MC)
1789  */
1790 static av_always_inline
1792  uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv,
1793  int x_off, int y_off, int block_w, int block_h,
1794  int width, int height, ptrdiff_t linesize,
1795  vp8_mc_func mc_func[3][3])
1796 {
1797  uint8_t *src1 = ref->f->data[1], *src2 = ref->f->data[2];
1798 
1799  if (AV_RN32A(mv)) {
1800  int mx = mv->x & 7, mx_idx = subpel_idx[0][mx];
1801  int my = mv->y & 7, my_idx = subpel_idx[0][my];
1802 
1803  x_off += mv->x >> 3;
1804  y_off += mv->y >> 3;
1805 
1806  // edge emulation
1807  src1 += y_off * linesize + x_off;
1808  src2 += y_off * linesize + x_off;
1809  ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 3, 0);
1810  if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1811  y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1813  src1 - my_idx * linesize - mx_idx,
1814  EDGE_EMU_LINESIZE, linesize,
1815  block_w + subpel_idx[1][mx],
1816  block_h + subpel_idx[1][my],
1817  x_off - mx_idx, y_off - my_idx, width, height);
1818  src1 = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1819  mc_func[my_idx][mx_idx](dst1, linesize, src1, EDGE_EMU_LINESIZE, block_h, mx, my);
1820 
1822  src2 - my_idx * linesize - mx_idx,
1823  EDGE_EMU_LINESIZE, linesize,
1824  block_w + subpel_idx[1][mx],
1825  block_h + subpel_idx[1][my],
1826  x_off - mx_idx, y_off - my_idx, width, height);
1827  src2 = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1828  mc_func[my_idx][mx_idx](dst2, linesize, src2, EDGE_EMU_LINESIZE, block_h, mx, my);
1829  } else {
1830  mc_func[my_idx][mx_idx](dst1, linesize, src1, linesize, block_h, mx, my);
1831  mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my);
1832  }
1833  } else {
1834  ff_thread_await_progress(ref, (3 + y_off + block_h) >> 3, 0);
1835  mc_func[0][0](dst1, linesize, src1 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1836  mc_func[0][0](dst2, linesize, src2 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1837  }
1838 }
1839 
1840 static av_always_inline
1842  ThreadFrame *ref_frame, int x_off, int y_off,
1843  int bx_off, int by_off, int block_w, int block_h,
1844  int width, int height, VP56mv *mv)
1845 {
1846  VP56mv uvmv = *mv;
1847 
1848  /* Y */
1849  vp8_mc_luma(s, td, dst[0] + by_off * s->linesize + bx_off,
1850  ref_frame, mv, x_off + bx_off, y_off + by_off,
1851  block_w, block_h, width, height, s->linesize,
1852  s->put_pixels_tab[block_w == 8]);
1853 
1854  /* U/V */
1855  if (s->profile == 3) {
1856  /* this block only applies VP8; it is safe to check
1857  * only the profile, as VP7 profile <= 1 */
1858  uvmv.x &= ~7;
1859  uvmv.y &= ~7;
1860  }
1861  x_off >>= 1;
1862  y_off >>= 1;
1863  bx_off >>= 1;
1864  by_off >>= 1;
1865  width >>= 1;
1866  height >>= 1;
1867  block_w >>= 1;
1868  block_h >>= 1;
1869  vp8_mc_chroma(s, td, dst[1] + by_off * s->uvlinesize + bx_off,
1870  dst[2] + by_off * s->uvlinesize + bx_off, ref_frame,
1871  &uvmv, x_off + bx_off, y_off + by_off,
1872  block_w, block_h, width, height, s->uvlinesize,
1873  s->put_pixels_tab[1 + (block_w == 4)]);
1874 }
1875 
1876 /* Fetch pixels for estimated mv 4 macroblocks ahead.
1877  * Optimized for 64-byte cache lines. Inspired by ffh264 prefetch_motion. */
1878 static av_always_inline
1879 void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
1880  int mb_xy, int ref)
1881 {
1882  /* Don't prefetch refs that haven't been used very often this frame. */
1883  if (s->ref_count[ref - 1] > (mb_xy >> 5)) {
1884  int x_off = mb_x << 4, y_off = mb_y << 4;
1885  int mx = (mb->mv.x >> 2) + x_off + 8;
1886  int my = (mb->mv.y >> 2) + y_off;
1887  uint8_t **src = s->framep[ref]->tf.f->data;
1888  int off = mx + (my + (mb_x & 3) * 4) * s->linesize + 64;
1889  /* For threading, a ff_thread_await_progress here might be useful, but
1890  * it actually slows down the decoder. Since a bad prefetch doesn't
1891  * generate bad decoder output, we don't run it here. */
1892  s->vdsp.prefetch(src[0] + off, s->linesize, 4);
1893  off = (mx >> 1) + ((my >> 1) + (mb_x & 7)) * s->uvlinesize + 64;
1894  s->vdsp.prefetch(src[1] + off, src[2] - src[1], 2);
1895  }
1896 }
1897 
1898 /**
1899  * Apply motion vectors to prediction buffer, chapter 18.
1900  */
1901 static av_always_inline
1903  VP8Macroblock *mb, int mb_x, int mb_y)
1904 {
1905  int x_off = mb_x << 4, y_off = mb_y << 4;
1906  int width = 16 * s->mb_width, height = 16 * s->mb_height;
1907  ThreadFrame *ref = &s->framep[mb->ref_frame]->tf;
1908  VP56mv *bmv = mb->bmv;
1909 
1910  switch (mb->partitioning) {
1911  case VP8_SPLITMVMODE_NONE:
1912  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1913  0, 0, 16, 16, width, height, &mb->mv);
1914  break;
1915  case VP8_SPLITMVMODE_4x4: {
1916  int x, y;
1917  VP56mv uvmv;
1918 
1919  /* Y */
1920  for (y = 0; y < 4; y++) {
1921  for (x = 0; x < 4; x++) {
1922  vp8_mc_luma(s, td, dst[0] + 4 * y * s->linesize + x * 4,
1923  ref, &bmv[4 * y + x],
1924  4 * x + x_off, 4 * y + y_off, 4, 4,
1925  width, height, s->linesize,
1926  s->put_pixels_tab[2]);
1927  }
1928  }
1929 
1930  /* U/V */
1931  x_off >>= 1;
1932  y_off >>= 1;
1933  width >>= 1;
1934  height >>= 1;
1935  for (y = 0; y < 2; y++) {
1936  for (x = 0; x < 2; x++) {
1937  uvmv.x = mb->bmv[2 * y * 4 + 2 * x ].x +
1938  mb->bmv[2 * y * 4 + 2 * x + 1].x +
1939  mb->bmv[(2 * y + 1) * 4 + 2 * x ].x +
1940  mb->bmv[(2 * y + 1) * 4 + 2 * x + 1].x;
1941  uvmv.y = mb->bmv[2 * y * 4 + 2 * x ].y +
1942  mb->bmv[2 * y * 4 + 2 * x + 1].y +
1943  mb->bmv[(2 * y + 1) * 4 + 2 * x ].y +
1944  mb->bmv[(2 * y + 1) * 4 + 2 * x + 1].y;
1945  uvmv.x = (uvmv.x + 2 + FF_SIGNBIT(uvmv.x)) >> 2;
1946  uvmv.y = (uvmv.y + 2 + FF_SIGNBIT(uvmv.y)) >> 2;
1947  if (s->profile == 3) {
1948  uvmv.x &= ~7;
1949  uvmv.y &= ~7;
1950  }
1951  vp8_mc_chroma(s, td, dst[1] + 4 * y * s->uvlinesize + x * 4,
1952  dst[2] + 4 * y * s->uvlinesize + x * 4, ref,
1953  &uvmv, 4 * x + x_off, 4 * y + y_off, 4, 4,
1954  width, height, s->uvlinesize,
1955  s->put_pixels_tab[2]);
1956  }
1957  }
1958  break;
1959  }
1960  case VP8_SPLITMVMODE_16x8:
1961  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1962  0, 0, 16, 8, width, height, &bmv[0]);
1963  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1964  0, 8, 16, 8, width, height, &bmv[1]);
1965  break;
1966  case VP8_SPLITMVMODE_8x16:
1967  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1968  0, 0, 8, 16, width, height, &bmv[0]);
1969  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1970  8, 0, 8, 16, width, height, &bmv[1]);
1971  break;
1972  case VP8_SPLITMVMODE_8x8:
1973  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1974  0, 0, 8, 8, width, height, &bmv[0]);
1975  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1976  8, 0, 8, 8, width, height, &bmv[1]);
1977  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1978  0, 8, 8, 8, width, height, &bmv[2]);
1979  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1980  8, 8, 8, 8, width, height, &bmv[3]);
1981  break;
1982  }
1983 }
1984 
1985 static av_always_inline
1987 {
1988  int x, y, ch;
1989 
1990  if (mb->mode != MODE_I4x4) {
1991  uint8_t *y_dst = dst[0];
1992  for (y = 0; y < 4; y++) {
1993  uint32_t nnz4 = AV_RL32(td->non_zero_count_cache[y]);
1994  if (nnz4) {
1995  if (nnz4 & ~0x01010101) {
1996  for (x = 0; x < 4; x++) {
1997  if ((uint8_t) nnz4 == 1)
1998  s->vp8dsp.vp8_idct_dc_add(y_dst + 4 * x,
1999  td->block[y][x],
2000  s->linesize);
2001  else if ((uint8_t) nnz4 > 1)
2002  s->vp8dsp.vp8_idct_add(y_dst + 4 * x,
2003  td->block[y][x],
2004  s->linesize);
2005  nnz4 >>= 8;
2006  if (!nnz4)
2007  break;
2008  }
2009  } else {
2010  s->vp8dsp.vp8_idct_dc_add4y(y_dst, td->block[y], s->linesize);
2011  }
2012  }
2013  y_dst += 4 * s->linesize;
2014  }
2015  }
2016 
2017  for (ch = 0; ch < 2; ch++) {
2018  uint32_t nnz4 = AV_RL32(td->non_zero_count_cache[4 + ch]);
2019  if (nnz4) {
2020  uint8_t *ch_dst = dst[1 + ch];
2021  if (nnz4 & ~0x01010101) {
2022  for (y = 0; y < 2; y++) {
2023  for (x = 0; x < 2; x++) {
2024  if ((uint8_t) nnz4 == 1)
2025  s->vp8dsp.vp8_idct_dc_add(ch_dst + 4 * x,
2026  td->block[4 + ch][(y << 1) + x],
2027  s->uvlinesize);
2028  else if ((uint8_t) nnz4 > 1)
2029  s->vp8dsp.vp8_idct_add(ch_dst + 4 * x,
2030  td->block[4 + ch][(y << 1) + x],
2031  s->uvlinesize);
2032  nnz4 >>= 8;
2033  if (!nnz4)
2034  goto chroma_idct_end;
2035  }
2036  ch_dst += 4 * s->uvlinesize;
2037  }
2038  } else {
2039  s->vp8dsp.vp8_idct_dc_add4uv(ch_dst, td->block[4 + ch], s->uvlinesize);
2040  }
2041  }
2042 chroma_idct_end:
2043  ;
2044  }
2045 }
2046 
2047 static av_always_inline
2049  VP8FilterStrength *f, int is_vp7)
2050 {
2051  int interior_limit, filter_level;
2052 
2053  if (s->segmentation.enabled) {
2054  filter_level = s->segmentation.filter_level[mb->segment];
2055  if (!s->segmentation.absolute_vals)
2056  filter_level += s->filter.level;
2057  } else
2058  filter_level = s->filter.level;
2059 
2060  if (s->lf_delta.enabled) {
2061  filter_level += s->lf_delta.ref[mb->ref_frame];
2062  filter_level += s->lf_delta.mode[mb->mode];
2063  }
2064 
2065  filter_level = av_clip_uintp2(filter_level, 6);
2066 
2067  interior_limit = filter_level;
2068  if (s->filter.sharpness) {
2069  interior_limit >>= (s->filter.sharpness + 3) >> 2;
2070  interior_limit = FFMIN(interior_limit, 9 - s->filter.sharpness);
2071  }
2072  interior_limit = FFMAX(interior_limit, 1);
2073 
2074  f->filter_level = filter_level;
2075  f->inner_limit = interior_limit;
2076  f->inner_filter = is_vp7 || !mb->skip || mb->mode == MODE_I4x4 ||
2077  mb->mode == VP8_MVMODE_SPLIT;
2078 }
2079 
2080 static av_always_inline
2082  int mb_x, int mb_y, int is_vp7)
2083 {
2084  int mbedge_lim, bedge_lim_y, bedge_lim_uv, hev_thresh;
2085  int filter_level = f->filter_level;
2086  int inner_limit = f->inner_limit;
2087  int inner_filter = f->inner_filter;
2088  ptrdiff_t linesize = s->linesize;
2089  ptrdiff_t uvlinesize = s->uvlinesize;
2090  static const uint8_t hev_thresh_lut[2][64] = {
2091  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
2092  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2093  3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
2094  3, 3, 3, 3 },
2095  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
2096  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2097  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2098  2, 2, 2, 2 }
2099  };
2100 
2101  if (!filter_level)
2102  return;
2103 
2104  if (is_vp7) {
2105  bedge_lim_y = filter_level;
2106  bedge_lim_uv = filter_level * 2;
2107  mbedge_lim = filter_level + 2;
2108  } else {
2109  bedge_lim_y =
2110  bedge_lim_uv = filter_level * 2 + inner_limit;
2111  mbedge_lim = bedge_lim_y + 4;
2112  }
2113 
2114  hev_thresh = hev_thresh_lut[s->keyframe][filter_level];
2115 
2116  if (mb_x) {
2117  s->vp8dsp.vp8_h_loop_filter16y(dst[0], linesize,
2118  mbedge_lim, inner_limit, hev_thresh);
2119  s->vp8dsp.vp8_h_loop_filter8uv(dst[1], dst[2], uvlinesize,
2120  mbedge_lim, inner_limit, hev_thresh);
2121  }
2122 
2123 #define H_LOOP_FILTER_16Y_INNER(cond) \
2124  if (cond && inner_filter) { \
2125  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 4, linesize, \
2126  bedge_lim_y, inner_limit, \
2127  hev_thresh); \
2128  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 8, linesize, \
2129  bedge_lim_y, inner_limit, \
2130  hev_thresh); \
2131  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 12, linesize, \
2132  bedge_lim_y, inner_limit, \
2133  hev_thresh); \
2134  s->vp8dsp.vp8_h_loop_filter8uv_inner(dst[1] + 4, dst[2] + 4, \
2135  uvlinesize, bedge_lim_uv, \
2136  inner_limit, hev_thresh); \
2137  }
2138 
2139  H_LOOP_FILTER_16Y_INNER(!is_vp7)
2140 
2141  if (mb_y) {
2142  s->vp8dsp.vp8_v_loop_filter16y(dst[0], linesize,
2143  mbedge_lim, inner_limit, hev_thresh);
2144  s->vp8dsp.vp8_v_loop_filter8uv(dst[1], dst[2], uvlinesize,
2145  mbedge_lim, inner_limit, hev_thresh);
2146  }
2147 
2148  if (inner_filter) {
2149  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 4 * linesize,
2150  linesize, bedge_lim_y,
2151  inner_limit, hev_thresh);
2152  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 8 * linesize,
2153  linesize, bedge_lim_y,
2154  inner_limit, hev_thresh);
2155  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 12 * linesize,
2156  linesize, bedge_lim_y,
2157  inner_limit, hev_thresh);
2158  s->vp8dsp.vp8_v_loop_filter8uv_inner(dst[1] + 4 * uvlinesize,
2159  dst[2] + 4 * uvlinesize,
2160  uvlinesize, bedge_lim_uv,
2161  inner_limit, hev_thresh);
2162  }
2163 
2164  H_LOOP_FILTER_16Y_INNER(is_vp7)
2165 }
2166 
2167 static av_always_inline
2169  int mb_x, int mb_y)
2170 {
2171  int mbedge_lim, bedge_lim;
2172  int filter_level = f->filter_level;
2173  int inner_limit = f->inner_limit;
2174  int inner_filter = f->inner_filter;
2175  ptrdiff_t linesize = s->linesize;
2176 
2177  if (!filter_level)
2178  return;
2179 
2180  bedge_lim = 2 * filter_level + inner_limit;
2181  mbedge_lim = bedge_lim + 4;
2182 
2183  if (mb_x)
2184  s->vp8dsp.vp8_h_loop_filter_simple(dst, linesize, mbedge_lim);
2185  if (inner_filter) {
2186  s->vp8dsp.vp8_h_loop_filter_simple(dst + 4, linesize, bedge_lim);
2187  s->vp8dsp.vp8_h_loop_filter_simple(dst + 8, linesize, bedge_lim);
2188  s->vp8dsp.vp8_h_loop_filter_simple(dst + 12, linesize, bedge_lim);
2189  }
2190 
2191  if (mb_y)
2192  s->vp8dsp.vp8_v_loop_filter_simple(dst, linesize, mbedge_lim);
2193  if (inner_filter) {
2194  s->vp8dsp.vp8_v_loop_filter_simple(dst + 4 * linesize, linesize, bedge_lim);
2195  s->vp8dsp.vp8_v_loop_filter_simple(dst + 8 * linesize, linesize, bedge_lim);
2196  s->vp8dsp.vp8_v_loop_filter_simple(dst + 12 * linesize, linesize, bedge_lim);
2197  }
2198 }
2199 
2200 #define MARGIN (16 << 2)
2201 static av_always_inline
2203  VP8Frame *prev_frame, int is_vp7)
2204 {
2205  VP8Context *s = avctx->priv_data;
2206  int mb_x, mb_y;
2207 
2208  s->mv_min.y = -MARGIN;
2209  s->mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
2210  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
2212  ((s->mb_width + 1) * (mb_y + 1) + 1);
2213  int mb_xy = mb_y * s->mb_width;
2214 
2215  AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED * 0x01010101);
2216 
2217  s->mv_min.x = -MARGIN;
2218  s->mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
2219  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
2220  if (mb_y == 0)
2221  AV_WN32A((mb - s->mb_width - 1)->intra4x4_pred_mode_top,
2222  DC_PRED * 0x01010101);
2223  decode_mb_mode(s, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
2224  prev_frame && prev_frame->seg_map ?
2225  prev_frame->seg_map->data + mb_xy : NULL, 1, is_vp7);
2226  s->mv_min.x -= 64;
2227  s->mv_max.x -= 64;
2228  }
2229  s->mv_min.y -= 64;
2230  s->mv_max.y -= 64;
2231  }
2232 }
2233 
2234 static void vp7_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame,
2235  VP8Frame *prev_frame)
2236 {
2237  vp78_decode_mv_mb_modes(avctx, cur_frame, prev_frame, IS_VP7);
2238 }
2239 
2240 static void vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame,
2241  VP8Frame *prev_frame)
2242 {
2243  vp78_decode_mv_mb_modes(avctx, cur_frame, prev_frame, IS_VP8);
2244 }
2245 
2246 #if HAVE_THREADS
2247 #define check_thread_pos(td, otd, mb_x_check, mb_y_check) \
2248  do { \
2249  int tmp = (mb_y_check << 16) | (mb_x_check & 0xFFFF); \
2250  if (otd->thread_mb_pos < tmp) { \
2251  pthread_mutex_lock(&otd->lock); \
2252  td->wait_mb_pos = tmp; \
2253  do { \
2254  if (otd->thread_mb_pos >= tmp) \
2255  break; \
2256  pthread_cond_wait(&otd->cond, &otd->lock); \
2257  } while (1); \
2258  td->wait_mb_pos = INT_MAX; \
2259  pthread_mutex_unlock(&otd->lock); \
2260  } \
2261  } while (0)
2262 
2263 #define update_pos(td, mb_y, mb_x) \
2264  do { \
2265  int pos = (mb_y << 16) | (mb_x & 0xFFFF); \
2266  int sliced_threading = (avctx->active_thread_type == FF_THREAD_SLICE) && \
2267  (num_jobs > 1); \
2268  int is_null = !next_td || !prev_td; \
2269  int pos_check = (is_null) ? 1 \
2270  : (next_td != td && \
2271  pos >= next_td->wait_mb_pos) || \
2272  (prev_td != td && \
2273  pos >= prev_td->wait_mb_pos); \
2274  td->thread_mb_pos = pos; \
2275  if (sliced_threading && pos_check) { \
2276  pthread_mutex_lock(&td->lock); \
2277  pthread_cond_broadcast(&td->cond); \
2278  pthread_mutex_unlock(&td->lock); \
2279  } \
2280  } while (0)
2281 #else
2282 #define check_thread_pos(td, otd, mb_x_check, mb_y_check) while(0)
2283 #define update_pos(td, mb_y, mb_x) while(0)
2284 #endif
2285 
2287  int jobnr, int threadnr, int is_vp7)
2288 {
2289  VP8Context *s = avctx->priv_data;
2290  VP8ThreadData *prev_td, *next_td, *td = &s->thread_data[threadnr];
2291  int mb_y = td->thread_mb_pos >> 16;
2292  int mb_x, mb_xy = mb_y * s->mb_width;
2293  int num_jobs = s->num_jobs;
2294  VP8Frame *curframe = s->curframe, *prev_frame = s->prev_frame;
2295  VP56RangeCoder *c = &s->coeff_partition[mb_y & (s->num_coeff_partitions - 1)];
2296  VP8Macroblock *mb;
2297  uint8_t *dst[3] = {
2298  curframe->tf.f->data[0] + 16 * mb_y * s->linesize,
2299  curframe->tf.f->data[1] + 8 * mb_y * s->uvlinesize,
2300  curframe->tf.f->data[2] + 8 * mb_y * s->uvlinesize
2301  };
2302 
2303  if (c->end <= c->buffer && c->bits >= 0)
2304  return AVERROR_INVALIDDATA;
2305 
2306  if (mb_y == 0)
2307  prev_td = td;
2308  else
2309  prev_td = &s->thread_data[(jobnr + num_jobs - 1) % num_jobs];
2310  if (mb_y == s->mb_height - 1)
2311  next_td = td;
2312  else
2313  next_td = &s->thread_data[(jobnr + 1) % num_jobs];
2314  if (s->mb_layout == 1)
2315  mb = s->macroblocks_base + ((s->mb_width + 1) * (mb_y + 1) + 1);
2316  else {
2317  // Make sure the previous frame has read its segmentation map,
2318  // if we re-use the same map.
2319  if (prev_frame && s->segmentation.enabled &&
2321  ff_thread_await_progress(&prev_frame->tf, mb_y, 0);
2322  mb = s->macroblocks + (s->mb_height - mb_y - 1) * 2;
2323  memset(mb - 1, 0, sizeof(*mb)); // zero left macroblock
2324  AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED * 0x01010101);
2325  }
2326 
2327  if (!is_vp7 || mb_y == 0)
2328  memset(td->left_nnz, 0, sizeof(td->left_nnz));
2329 
2330  s->mv_min.x = -MARGIN;
2331  s->mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
2332 
2333  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
2334  if (c->end <= c->buffer && c->bits >= 0)
2335  return AVERROR_INVALIDDATA;
2336  // Wait for previous thread to read mb_x+2, and reach mb_y-1.
2337  if (prev_td != td) {
2338  if (threadnr != 0) {
2339  check_thread_pos(td, prev_td,
2340  mb_x + (is_vp7 ? 2 : 1),
2341  mb_y - (is_vp7 ? 2 : 1));
2342  } else {
2343  check_thread_pos(td, prev_td,
2344  mb_x + (is_vp7 ? 2 : 1) + s->mb_width + 3,
2345  mb_y - (is_vp7 ? 2 : 1));
2346  }
2347  }
2348 
2349  s->vdsp.prefetch(dst[0] + (mb_x & 3) * 4 * s->linesize + 64,
2350  s->linesize, 4);
2351  s->vdsp.prefetch(dst[1] + (mb_x & 7) * s->uvlinesize + 64,
2352  dst[2] - dst[1], 2);
2353 
2354  if (!s->mb_layout)
2355  decode_mb_mode(s, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
2356  prev_frame && prev_frame->seg_map ?
2357  prev_frame->seg_map->data + mb_xy : NULL, 0, is_vp7);
2358 
2359  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_PREVIOUS);
2360 
2361  if (!mb->skip)
2362  decode_mb_coeffs(s, td, c, mb, s->top_nnz[mb_x], td->left_nnz, is_vp7);
2363 
2364  if (mb->mode <= MODE_I4x4)
2365  intra_predict(s, td, dst, mb, mb_x, mb_y, is_vp7);
2366  else
2367  inter_predict(s, td, dst, mb, mb_x, mb_y);
2368 
2369  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN);
2370 
2371  if (!mb->skip) {
2372  idct_mb(s, td, dst, mb);
2373  } else {
2374  AV_ZERO64(td->left_nnz);
2375  AV_WN64(s->top_nnz[mb_x], 0); // array of 9, so unaligned
2376 
2377  /* Reset DC block predictors if they would exist
2378  * if the mb had coefficients */
2379  if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
2380  td->left_nnz[8] = 0;
2381  s->top_nnz[mb_x][8] = 0;
2382  }
2383  }
2384 
2385  if (s->deblock_filter)
2386  filter_level_for_mb(s, mb, &td->filter_strength[mb_x], is_vp7);
2387 
2388  if (s->deblock_filter && num_jobs != 1 && threadnr == num_jobs - 1) {
2389  if (s->filter.simple)
2390  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2391  NULL, NULL, s->linesize, 0, 1);
2392  else
2393  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2394  dst[1], dst[2], s->linesize, s->uvlinesize, 0);
2395  }
2396 
2397  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN2);
2398 
2399  dst[0] += 16;
2400  dst[1] += 8;
2401  dst[2] += 8;
2402  s->mv_min.x -= 64;
2403  s->mv_max.x -= 64;
2404 
2405  if (mb_x == s->mb_width + 1) {
2406  update_pos(td, mb_y, s->mb_width + 3);
2407  } else {
2408  update_pos(td, mb_y, mb_x);
2409  }
2410  }
2411  return 0;
2412 }
2413 
2414 static int vp7_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata,
2415  int jobnr, int threadnr)
2416 {
2417  return decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr, 1);
2418 }
2419 
2420 static int vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata,
2421  int jobnr, int threadnr)
2422 {
2423  return decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr, 0);
2424 }
2425 
2426 static av_always_inline void filter_mb_row(AVCodecContext *avctx, void *tdata,
2427  int jobnr, int threadnr, int is_vp7)
2428 {
2429  VP8Context *s = avctx->priv_data;
2430  VP8ThreadData *td = &s->thread_data[threadnr];
2431  int mb_x, mb_y = td->thread_mb_pos >> 16, num_jobs = s->num_jobs;
2432  AVFrame *curframe = s->curframe->tf.f;
2433  VP8Macroblock *mb;
2434  VP8ThreadData *prev_td, *next_td;
2435  uint8_t *dst[3] = {
2436  curframe->data[0] + 16 * mb_y * s->linesize,
2437  curframe->data[1] + 8 * mb_y * s->uvlinesize,
2438  curframe->data[2] + 8 * mb_y * s->uvlinesize
2439  };
2440 
2441  if (s->mb_layout == 1)
2442  mb = s->macroblocks_base + ((s->mb_width + 1) * (mb_y + 1) + 1);
2443  else
2444  mb = s->macroblocks + (s->mb_height - mb_y - 1) * 2;
2445 
2446  if (mb_y == 0)
2447  prev_td = td;
2448  else
2449  prev_td = &s->thread_data[(jobnr + num_jobs - 1) % num_jobs];
2450  if (mb_y == s->mb_height - 1)
2451  next_td = td;
2452  else
2453  next_td = &s->thread_data[(jobnr + 1) % num_jobs];
2454 
2455  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb++) {
2456  VP8FilterStrength *f = &td->filter_strength[mb_x];
2457  if (prev_td != td)
2458  check_thread_pos(td, prev_td,
2459  (mb_x + 1) + (s->mb_width + 3), mb_y - 1);
2460  if (next_td != td)
2461  if (next_td != &s->thread_data[0])
2462  check_thread_pos(td, next_td, mb_x + 1, mb_y + 1);
2463 
2464  if (num_jobs == 1) {
2465  if (s->filter.simple)
2466  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2467  NULL, NULL, s->linesize, 0, 1);
2468  else
2469  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2470  dst[1], dst[2], s->linesize, s->uvlinesize, 0);
2471  }
2472 
2473  if (s->filter.simple)
2474  filter_mb_simple(s, dst[0], f, mb_x, mb_y);
2475  else
2476  filter_mb(s, dst, f, mb_x, mb_y, is_vp7);
2477  dst[0] += 16;
2478  dst[1] += 8;
2479  dst[2] += 8;
2480 
2481  update_pos(td, mb_y, (s->mb_width + 3) + mb_x);
2482  }
2483 }
2484 
2485 static void vp7_filter_mb_row(AVCodecContext *avctx, void *tdata,
2486  int jobnr, int threadnr)
2487 {
2488  filter_mb_row(avctx, tdata, jobnr, threadnr, 1);
2489 }
2490 
2491 static void vp8_filter_mb_row(AVCodecContext *avctx, void *tdata,
2492  int jobnr, int threadnr)
2493 {
2494  filter_mb_row(avctx, tdata, jobnr, threadnr, 0);
2495 }
2496 
2497 static av_always_inline
2498 int vp78_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr,
2499  int threadnr, int is_vp7)
2500 {
2501  VP8Context *s = avctx->priv_data;
2502  VP8ThreadData *td = &s->thread_data[jobnr];
2503  VP8ThreadData *next_td = NULL, *prev_td = NULL;
2504  VP8Frame *curframe = s->curframe;
2505  int mb_y, num_jobs = s->num_jobs;
2506  int ret;
2507 
2508  td->thread_nr = threadnr;
2509  for (mb_y = jobnr; mb_y < s->mb_height; mb_y += num_jobs) {
2510  td->thread_mb_pos = mb_y << 16;
2511  ret = s->decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr);
2512  if (ret < 0) {
2513  update_pos(td, s->mb_height, INT_MAX & 0xFFFF);
2514  return ret;
2515  }
2516  if (s->deblock_filter)
2517  s->filter_mb_row(avctx, tdata, jobnr, threadnr);
2518  update_pos(td, mb_y, INT_MAX & 0xFFFF);
2519 
2520  s->mv_min.y -= 64;
2521  s->mv_max.y -= 64;
2522 
2523  if (avctx->active_thread_type == FF_THREAD_FRAME)
2524  ff_thread_report_progress(&curframe->tf, mb_y, 0);
2525  }
2526 
2527  return 0;
2528 }
2529 
2530 static int vp7_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata,
2531  int jobnr, int threadnr)
2532 {
2533  return vp78_decode_mb_row_sliced(avctx, tdata, jobnr, threadnr, IS_VP7);
2534 }
2535 
2536 static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata,
2537  int jobnr, int threadnr)
2538 {
2539  return vp78_decode_mb_row_sliced(avctx, tdata, jobnr, threadnr, IS_VP8);
2540 }
2541 
2542 
2543 static av_always_inline
2544 int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2545  AVPacket *avpkt, int is_vp7)
2546 {
2547  VP8Context *s = avctx->priv_data;
2548  int ret, i, referenced, num_jobs;
2549  enum AVDiscard skip_thresh;
2550  VP8Frame *av_uninit(curframe), *prev_frame;
2551 
2552  if (is_vp7)
2553  ret = vp7_decode_frame_header(s, avpkt->data, avpkt->size);
2554  else
2555  ret = vp8_decode_frame_header(s, avpkt->data, avpkt->size);
2556 
2557  if (ret < 0)
2558  goto err;
2559 
2560  prev_frame = s->framep[VP56_FRAME_CURRENT];
2561 
2562  referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT ||
2564 
2565  skip_thresh = !referenced ? AVDISCARD_NONREF
2566  : !s->keyframe ? AVDISCARD_NONKEY
2567  : AVDISCARD_ALL;
2568 
2569  if (avctx->skip_frame >= skip_thresh) {
2570  s->invisible = 1;
2571  memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
2572  goto skip_decode;
2573  }
2574  s->deblock_filter = s->filter.level && avctx->skip_loop_filter < skip_thresh;
2575 
2576  // release no longer referenced frames
2577  for (i = 0; i < 5; i++)
2578  if (s->frames[i].tf.f->data[0] &&
2579  &s->frames[i] != prev_frame &&
2580  &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
2581  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
2582  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2])
2583  vp8_release_frame(s, &s->frames[i]);
2584 
2585  curframe = s->framep[VP56_FRAME_CURRENT] = vp8_find_free_buffer(s);
2586 
2587  if (!s->colorspace)
2588  avctx->colorspace = AVCOL_SPC_BT470BG;
2589  if (s->fullrange)
2590  avctx->color_range = AVCOL_RANGE_JPEG;
2591  else
2592  avctx->color_range = AVCOL_RANGE_MPEG;
2593 
2594  /* Given that arithmetic probabilities are updated every frame, it's quite
2595  * likely that the values we have on a random interframe are complete
2596  * junk if we didn't start decode on a keyframe. So just don't display
2597  * anything rather than junk. */
2598  if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] ||
2599  !s->framep[VP56_FRAME_GOLDEN] ||
2600  !s->framep[VP56_FRAME_GOLDEN2])) {
2601  av_log(avctx, AV_LOG_WARNING,
2602  "Discarding interframe without a prior keyframe!\n");
2603  ret = AVERROR_INVALIDDATA;
2604  goto err;
2605  }
2606 
2607  curframe->tf.f->key_frame = s->keyframe;
2608  curframe->tf.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I
2610  if ((ret = vp8_alloc_frame(s, curframe, referenced)) < 0)
2611  goto err;
2612 
2613  // check if golden and altref are swapped
2614  if (s->update_altref != VP56_FRAME_NONE)
2616  else
2618 
2619  if (s->update_golden != VP56_FRAME_NONE)
2621  else
2623 
2624  if (s->update_last)
2625  s->next_framep[VP56_FRAME_PREVIOUS] = curframe;
2626  else
2628 
2629  s->next_framep[VP56_FRAME_CURRENT] = curframe;
2630 
2631  if (avctx->codec->update_thread_context)
2632  ff_thread_finish_setup(avctx);
2633 
2634  s->linesize = curframe->tf.f->linesize[0];
2635  s->uvlinesize = curframe->tf.f->linesize[1];
2636 
2637  memset(s->top_nnz, 0, s->mb_width * sizeof(*s->top_nnz));
2638  /* Zero macroblock structures for top/top-left prediction
2639  * from outside the frame. */
2640  if (!s->mb_layout)
2641  memset(s->macroblocks + s->mb_height * 2 - 1, 0,
2642  (s->mb_width + 1) * sizeof(*s->macroblocks));
2643  if (!s->mb_layout && s->keyframe)
2644  memset(s->intra4x4_pred_mode_top, DC_PRED, s->mb_width * 4);
2645 
2646  memset(s->ref_count, 0, sizeof(s->ref_count));
2647 
2648  if (s->mb_layout == 1) {
2649  // Make sure the previous frame has read its segmentation map,
2650  // if we re-use the same map.
2651  if (prev_frame && s->segmentation.enabled &&
2653  ff_thread_await_progress(&prev_frame->tf, 1, 0);
2654  if (is_vp7)
2655  vp7_decode_mv_mb_modes(avctx, curframe, prev_frame);
2656  else
2657  vp8_decode_mv_mb_modes(avctx, curframe, prev_frame);
2658  }
2659 
2660  if (avctx->active_thread_type == FF_THREAD_FRAME)
2661  num_jobs = 1;
2662  else
2663  num_jobs = FFMIN(s->num_coeff_partitions, avctx->thread_count);
2664  s->num_jobs = num_jobs;
2665  s->curframe = curframe;
2666  s->prev_frame = prev_frame;
2667  s->mv_min.y = -MARGIN;
2668  s->mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
2669  for (i = 0; i < MAX_THREADS; i++) {
2670  s->thread_data[i].thread_mb_pos = 0;
2671  s->thread_data[i].wait_mb_pos = INT_MAX;
2672  }
2673  if (is_vp7)
2675  num_jobs);
2676  else
2678  num_jobs);
2679 
2680  ff_thread_report_progress(&curframe->tf, INT_MAX, 0);
2681  memcpy(&s->framep[0], &s->next_framep[0], sizeof(s->framep[0]) * 4);
2682 
2683 skip_decode:
2684  // if future frames don't use the updated probabilities,
2685  // reset them to the values we saved
2686  if (!s->update_probabilities)
2687  s->prob[0] = s->prob[1];
2688 
2689  if (!s->invisible) {
2690  if ((ret = av_frame_ref(data, curframe->tf.f)) < 0)
2691  return ret;
2692  *got_frame = 1;
2693  }
2694 
2695  return avpkt->size;
2696 err:
2697  memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
2698  return ret;
2699 }
2700 
2701 int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2702  AVPacket *avpkt)
2703 {
2704  return vp78_decode_frame(avctx, data, got_frame, avpkt, IS_VP8);
2705 }
2706 
2707 #if CONFIG_VP7_DECODER
2708 static int vp7_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2709  AVPacket *avpkt)
2710 {
2711  return vp78_decode_frame(avctx, data, got_frame, avpkt, IS_VP7);
2712 }
2713 #endif /* CONFIG_VP7_DECODER */
2714 
2716 {
2717  VP8Context *s = avctx->priv_data;
2718  int i;
2719 
2720  if (!s)
2721  return 0;
2722 
2723  vp8_decode_flush_impl(avctx, 1);
2724  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
2725  av_frame_free(&s->frames[i].tf.f);
2726 
2727  return 0;
2728 }
2729 
2731 {
2732  int i;
2733  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++) {
2734  s->frames[i].tf.f = av_frame_alloc();
2735  if (!s->frames[i].tf.f)
2736  return AVERROR(ENOMEM);
2737  }
2738  return 0;
2739 }
2740 
2741 static av_always_inline
2742 int vp78_decode_init(AVCodecContext *avctx, int is_vp7)
2743 {
2744  VP8Context *s = avctx->priv_data;
2745  int ret;
2746 
2747  s->avctx = avctx;
2748  s->vp7 = avctx->codec->id == AV_CODEC_ID_VP7;
2749  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2750  avctx->internal->allocate_progress = 1;
2751 
2752  ff_videodsp_init(&s->vdsp, 8);
2753 
2754  ff_vp78dsp_init(&s->vp8dsp);
2755  if (CONFIG_VP7_DECODER && is_vp7) {
2757  ff_vp7dsp_init(&s->vp8dsp);
2760  } else if (CONFIG_VP8_DECODER && !is_vp7) {
2762  ff_vp8dsp_init(&s->vp8dsp);
2765  }
2766 
2767  /* does not change for VP8 */
2768  memcpy(s->prob[0].scan, ff_zigzag_scan, sizeof(s->prob[0].scan));
2769 
2770  if ((ret = vp8_init_frames(s)) < 0) {
2771  ff_vp8_decode_free(avctx);
2772  return ret;
2773  }
2774 
2775  return 0;
2776 }
2777 
2778 #if CONFIG_VP7_DECODER
2779 static int vp7_decode_init(AVCodecContext *avctx)
2780 {
2781  return vp78_decode_init(avctx, IS_VP7);
2782 }
2783 #endif /* CONFIG_VP7_DECODER */
2784 
2786 {
2787  return vp78_decode_init(avctx, IS_VP8);
2788 }
2789 
2790 #if CONFIG_VP8_DECODER
2791 #if HAVE_THREADS
2792 static av_cold int vp8_decode_init_thread_copy(AVCodecContext *avctx)
2793 {
2794  VP8Context *s = avctx->priv_data;
2795  int ret;
2796 
2797  s->avctx = avctx;
2798 
2799  if ((ret = vp8_init_frames(s)) < 0) {
2800  ff_vp8_decode_free(avctx);
2801  return ret;
2802  }
2803 
2804  return 0;
2805 }
2806 
2807 #define REBASE(pic) ((pic) ? (pic) - &s_src->frames[0] + &s->frames[0] : NULL)
2808 
2809 static int vp8_decode_update_thread_context(AVCodecContext *dst,
2810  const AVCodecContext *src)
2811 {
2812  VP8Context *s = dst->priv_data, *s_src = src->priv_data;
2813  int i;
2814 
2815  if (s->macroblocks_base &&
2816  (s_src->mb_width != s->mb_width || s_src->mb_height != s->mb_height)) {
2817  free_buffers(s);
2818  s->mb_width = s_src->mb_width;
2819  s->mb_height = s_src->mb_height;
2820  }
2821 
2822  s->prob[0] = s_src->prob[!s_src->update_probabilities];
2823  s->segmentation = s_src->segmentation;
2824  s->lf_delta = s_src->lf_delta;
2825  memcpy(s->sign_bias, s_src->sign_bias, sizeof(s->sign_bias));
2826 
2827  for (i = 0; i < FF_ARRAY_ELEMS(s_src->frames); i++) {
2828  if (s_src->frames[i].tf.f->data[0]) {
2829  int ret = vp8_ref_frame(s, &s->frames[i], &s_src->frames[i]);
2830  if (ret < 0)
2831  return ret;
2832  }
2833  }
2834 
2835  s->framep[0] = REBASE(s_src->next_framep[0]);
2836  s->framep[1] = REBASE(s_src->next_framep[1]);
2837  s->framep[2] = REBASE(s_src->next_framep[2]);
2838  s->framep[3] = REBASE(s_src->next_framep[3]);
2839 
2840  return 0;
2841 }
2842 #endif /* HAVE_THREADS */
2843 #endif /* CONFIG_VP8_DECODER */
2844 
2845 #if CONFIG_VP7_DECODER
2846 AVCodec ff_vp7_decoder = {
2847  .name = "vp7",
2848  .long_name = NULL_IF_CONFIG_SMALL("On2 VP7"),
2849  .type = AVMEDIA_TYPE_VIDEO,
2850  .id = AV_CODEC_ID_VP7,
2851  .priv_data_size = sizeof(VP8Context),
2852  .init = vp7_decode_init,
2853  .close = ff_vp8_decode_free,
2854  .decode = vp7_decode_frame,
2855  .capabilities = AV_CODEC_CAP_DR1,
2857 };
2858 #endif /* CONFIG_VP7_DECODER */
2859 
2860 #if CONFIG_VP8_DECODER
2861 AVCodec ff_vp8_decoder = {
2862  .name = "vp8",
2863  .long_name = NULL_IF_CONFIG_SMALL("On2 VP8"),
2864  .type = AVMEDIA_TYPE_VIDEO,
2865  .id = AV_CODEC_ID_VP8,
2866  .priv_data_size = sizeof(VP8Context),
2868  .close = ff_vp8_decode_free,
2870  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
2873  .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp8_decode_init_thread_copy),
2874  .update_thread_context = ONLY_IF_THREADS_ENABLED(vp8_decode_update_thread_context),
2875 };
2876 #endif /* CONFIG_VP7_DECODER */
uint8_t golden
Definition: vp8.h:235
uint8_t inner_limit
Definition: vp8.h:75
#define VERT_PRED8x8
Definition: h264pred.h:70
VP8Macroblock * macroblocks
Definition: vp8.h:178
static const uint8_t vp8_dc_qlookup[VP8_MAX_QUANT+1]
Definition: vp8data.h:711
static av_always_inline void intra_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y, int is_vp7)
Definition: vp8.c:1595
static const uint8_t vp8_submv_prob[5][3]
Definition: vp8data.h:153
static const uint16_t vp7_ydc_qlookup[]
Definition: vp8data.h:778
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
#define NULL
Definition: coverity.c:32
const struct AVCodec * codec
Definition: avcodec.h:1731
discard all frames except keyframes
Definition: avcodec.h:821
Definition: vp9.h:83
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
Definition: videodsp.h:76
const char * s
Definition: avisynth_c.h:768
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static const uint8_t vp7_mv_default_prob[2][17]
Definition: vp8data.h:744
#define DC_128_PRED8x8
Definition: h264pred.h:76
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
Definition: os2threads.h:106
static void copy(const float *p1, float *p2, const int length)
(only used in prediction) no split MVs
Definition: vp8.h:70
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
void ff_vp7dsp_init(VP8DSPContext *c)
static void update_lf_deltas(VP8Context *s)
Definition: vp8.c:236
This structure describes decoded (raw) audio or video data.
Definition: frame.h:187
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
static void flush(AVCodecContext *avctx)
static int vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2420
static const uint8_t vp7_pred4x4_mode[]
Definition: vp8data.h:33
int8_t sign_bias[4]
one state [0, 1] per ref frame type
Definition: vp8.h:156
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1924
static av_always_inline int inter_predict_dc(int16_t block[16], int16_t pred[2])
Definition: vp8.c:1320
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
static int init_thread_copy(AVCodecContext *avctx)
Definition: tta.c:392
#define VP7_MV_PRED_COUNT
Definition: vp8data.h:68
AVFrame * f
Definition: thread.h:36
static av_always_inline int vp8_rac_get_tree(VP56RangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
Definition: vp56.h:380
uint8_t feature_value[4][4]
Definition: vp8.h:301
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:210
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
ptrdiff_t linesize
Definition: vp8.h:146
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:457
uint8_t * intra4x4_pred_mode_top
Definition: vp8.h:180
uint8_t mbskip_enabled
Definition: vp8.h:151
static VP56Frame ref_to_update(VP8Context *s, int update, VP56Frame ref)
Determine which buffers golden and altref should be updated with after this frame.
Definition: vp8.c:352
void(* vp8_v_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:48
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2459
static int vp7_decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2], const uint8_t scan[16])
Definition: vp8.c:1342
uint8_t token[4][16][3][NUM_DCT_TOKENS-1]
Definition: vp8.h:238
uint8_t scan[16]
Definition: vp8.h:240
int size
Definition: avcodec.h:1648
const char * b
Definition: vf_curves.c:113
static void vp8_decode_flush(AVCodecContext *avctx)
Definition: vp8.c:120
#define MARGIN
Definition: vp8.c:2200
vp8_mc_func put_vp8_bilinear_pixels_tab[3][3][3]
Definition: vp8dsp.h:81
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1950
VP56mv bmv[16]
Definition: vp8.h:91
struct VP8Context::@144 segmentation
Base parameters for segmentation, i.e.
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:411
av_cold void ff_vp78dsp_init(VP8DSPContext *dsp)
Definition: vp8dsp.c:664
uint8_t inner_filter
Definition: vp8.h:76
static const int8_t vp8_pred8x8c_tree[3][2]
Definition: vp8data.h:180
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:222
uint8_t segmentid[3]
Definition: vp8.h:231
static const uint16_t vp7_y2dc_qlookup[]
Definition: vp8data.h:803
discard all
Definition: avcodec.h:822
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
static void copy_chroma(AVFrame *dst, AVFrame *src, int width, int height)
Definition: vp8.c:427
#define src
Definition: vp8dsp.c:254
#define HOR_PRED8x8
Definition: h264pred.h:69
AVCodec.
Definition: avcodec.h:3671
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:87
uint8_t sharpness
Definition: vp8.h:175
#define AV_WN32A(p, v)
Definition: intreadwrite.h:543
2 16x8 blocks (vertical)
Definition: vp8.h:66
#define AV_COPY32(d, s)
Definition: intreadwrite.h:591
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
Definition: os2threads.h:138
int update_probabilities
If this flag is not set, all the probability updates are discarded after this frame is decoded...
Definition: vp8.h:253
VP8Frame * framep[4]
Definition: vp8.h:139
static int vp8_decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2])
Definition: vp8.c:1354
static void vp7_filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2485
#define VP7_MVC_SIZE
Definition: vp8.c:393
static int vp7_read_mv_component(VP56RangeCoder *c, const uint8_t *p)
Definition: vp8.c:814
vp8_mc_func put_vp8_epel_pixels_tab[3][3][3]
first dimension: 4-log2(width) second dimension: 0 if no vertical interpolation is needed; 1 4-tap ve...
Definition: vp8dsp.h:80
static av_always_inline const uint8_t * get_submv_prob(uint32_t left, uint32_t top, int is_vp7)
Definition: vp8.c:825
static const uint8_t vp8_pred8x8c_prob_inter[3]
Definition: vp8data.h:189
static av_always_inline int decode_block_coeffs(VP56RangeCoder *c, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, int zero_nhood, int16_t qmul[2], const uint8_t scan[16], int vp7)
Definition: vp8.c:1379
uint8_t(* top_nnz)[9]
Definition: vp8.h:220
int num_jobs
Definition: vp8.h:270
static const uint8_t vp8_mbsplits[5][16]
Definition: vp8data.h:127
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:3345
#define AV_RN32A(p)
Definition: intreadwrite.h:531
static int16_t block[64]
Definition: dct.c:115
uint8_t pred16x16[4]
Definition: vp8.h:236
static const int8_t vp8_pred16x16_tree_intra[4][2]
Definition: vp8data.h:47
uint8_t update_map
Definition: vp8.h:167
#define PLANE_PRED8x8
Definition: h264pred.h:71
uint16_t mb_height
Definition: vp8.h:145
int16_t y
Definition: vp56.h:68
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
int update_golden
VP56_FRAME_NONE if not updated, or which frame to copy if so.
Definition: vp8.h:246
static av_always_inline void filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2426
uint8_t intra4x4_pred_mode_top[4]
Definition: vp8.h:89
uint8_t
static int vp7_update_dimensions(VP8Context *s, int width, int height)
Definition: vp8.c:204
#define av_cold
Definition: attributes.h:82
ptrdiff_t uvlinesize
Definition: vp8.h:147
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
#define mb
#define DC_PRED8x8
Definition: h264pred.h:68
int fade_present
Fade bit present in bitstream (VP7)
Definition: vp8.h:286
static av_always_inline void vp7_decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
Definition: vp8.c:943
static VP8Frame * vp8_find_free_buffer(VP8Context *s)
Definition: vp8.c:125
uint8_t ref_frame
Definition: vp8.h:84
static av_always_inline int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y, int *copy_buf, int vp7)
Definition: vp8.c:1559
Multithreading support functions.
Definition: vp9.h:82
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: vp8.c:2701
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:388
uint8_t mvc[2][19]
Definition: vp8.h:239
VP56mv mv
Definition: vp8.h:90
int8_t base_quant[4]
Definition: vp8.h:168
static const uint8_t vp8_mv_update_prob[2][19]
Definition: vp8data.h:733
struct VP8Context::@146 qmat[4]
Macroblocks can have one of 4 different quants in a frame when segmentation is enabled.
static AVFrame * frame
void(* pred8x8[4+3+4])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:97
int update_last
update VP56_FRAME_PREVIOUS with the current one
Definition: vp8.h:245
#define height
uint8_t * data
Definition: avcodec.h:1647
int8_t yoffset
Definition: vp8data.h:62
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
Definition: utils.c:3973
static void parse_segment_info(VP8Context *s)
Definition: vp8.c:215
ptrdiff_t size
Definition: opengl_enc.c:101
VP8Frame * prev_frame
Definition: vp8.h:142
int num_coeff_partitions
All coefficients are contained in separate arith coding contexts.
Definition: vp8.h:259
static const uint8_t vp8_token_default_probs[4][8][3][NUM_DCT_TOKENS-1]
Definition: vp8data.h:369
vp8_mc_func put_pixels_tab[3][3][3]
Definition: vp8.h:264
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.h:93
#define AV_COPY64(d, s)
Definition: intreadwrite.h:595
uint8_t feature_index_prob[4][3]
Definition: vp8.h:300
uint8_t intra4x4_pred_mode_mb[16]
Definition: vp8.h:88
#define av_log(a,...)
static av_always_inline int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt, int is_vp7)
Definition: vp8.c:2544
uint8_t intra4x4_pred_mode_left[4]
Definition: vp8.h:181
#define VERT_VP8_PRED
for VP8, VERT_PRED is the average of
Definition: h264pred.h:60
uint8_t colorspace
0 is the only value allowed (meaning bt601)
Definition: vp8.h:267
static const VP56mv * get_bmv_ptr(const VP8Macroblock *mb, int subblock)
Definition: vp8.c:937
enum AVCodecID id
Definition: avcodec.h:3685
static const uint8_t vp8_mbsplit_count[4]
Definition: vp8data.h:142
static double alpha(void *priv, double x, double y)
Definition: vf_geq.c:99
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static const int8_t vp8_coeff_band_indexes[8][10]
Definition: vp8data.h:331
#define td
Definition: regdef.h:70
H264PredContext hpc
Definition: vp8.h:263
Definition: vp8.h:125
static const uint8_t vp8_pred4x4_mode[]
Definition: vp8data.h:40
static av_always_inline void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int mb_xy, int ref)
Definition: vp8.c:1879
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
uint8_t absolute_vals
Definition: vp8.h:166
uint16_t mb_width
Definition: vp8.h:144
void(* vp8_luma_dc_wht_dc)(int16_t block[4][4][16], int16_t dc[16])
Definition: vp8dsp.h:39
static const uint8_t vp8_dct_cat2_prob[]
Definition: vp8data.h:345
static const uint8_t vp8_mv_default_prob[2][19]
Definition: vp8data.h:755
#define FF_SIGNBIT(x)
Definition: internal.h:75
uint8_t last
Definition: vp8.h:234
static const int sizes[][2]
Definition: img2dec.c:50
#define AVERROR(e)
Definition: error.h:43
void(* vp8_h_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:54
static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:645
static void fade(uint8_t *dst, ptrdiff_t dst_linesize, const uint8_t *src, ptrdiff_t src_linesize, int width, int height, int alpha, int beta)
Definition: vp8.c:438
uint8_t mode
Definition: vp8.h:83
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:163
static av_always_inline int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1524
static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2536
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:180
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:3170
const char * r
Definition: vf_curves.c:111
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
VP8 compatible video decoder.
void(* vp8_v_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:52
static const uint8_t vp8_mbfirstidx[4][16]
Definition: vp8data.h:135
AVCodecContext * avctx
Definition: vp8.h:138
#define EDGE_EMU_LINESIZE
Definition: vp8.h:120
uint16_t inter_dc_pred[2][2]
Interframe DC prediction (VP7) [0] VP56_FRAME_PREVIOUS [1] VP56_FRAME_GOLDEN.
Definition: vp8.h:293
VideoDSPContext vdsp
Definition: vp8.h:261
const char * name
Name of the codec implementation.
Definition: avcodec.h:3678
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_RL24
Definition: bytestream.h:87
VP8Macroblock * macroblocks_base
Definition: vp8.h:243
static av_always_inline void vp8_mc_part(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], ThreadFrame *ref_frame, int x_off, int y_off, int bx_off, int by_off, int block_w, int block_h, int width, int height, VP56mv *mv)
Definition: vp8.c:1841
static const uint8_t vp8_pred4x4_prob_inter[9]
Definition: vp8data.h:192
uint8_t edge_emu_buffer[21 *EDGE_EMU_LINESIZE]
Definition: vp8.h:121
int16_t block[6][4][16]
Definition: vp8.h:95
static av_always_inline int decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2], const uint8_t scan[16], int vp7)
Definition: vp8.c:1260
static const int vp7_mode_contexts[31][4]
Definition: vp8data.h:84
static void vp8_filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2491
static void vp7_get_quants(VP8Context *s)
Definition: vp8.c:287
#define FFMAX(a, b)
Definition: common.h:94
uint8_t keyframe
Definition: vp8.h:149
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1057
struct VP8Context::@147 lf_delta
int x
Definition: vp8.h:131
const uint8_t * end
Definition: vp56.h:90
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:219
VP56Frame
Definition: vp56.h:40
int(* decode_mb_row_no_filter)(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.h:278
int16_t luma_qmul[2]
Definition: vp8.h:190
static const uint8_t vp8_pred16x16_prob_inter[4]
Definition: vp8data.h:164
Definition: hls.c:67
useful rectangle filling function
int ff_vp56_init_range_decoder(VP56RangeCoder *c, const uint8_t *buf, int buf_size)
Definition: vp56rac.c:40
#define MAX_THREADS
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int64_t nb_samples_notify, AVRational time_base)
4x4 blocks of 4x4px each
Definition: vp8.h:69
uint8_t deblock_filter
Definition: vp8.h:150
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:3162
#define H_LOOP_FILTER_16Y_INNER(cond)
#define FFMIN(a, b)
Definition: common.h:96
uint8_t feature_present_prob[4]
Definition: vp8.h:299
static av_always_inline void vp8_mc_chroma(VP8Context *s, VP8ThreadData *td, uint8_t *dst1, uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3])
chroma MC function
Definition: vp8.c:1791
#define width
uint8_t fullrange
whether we can skip clamping in dsp functions
Definition: vp8.h:268
int16_t block_dc[16]
Definition: vp8.h:96
static av_unused int vp8_rac_get_sint(VP56RangeCoder *c, int bits)
Definition: vp56.h:336
int width
picture width / height.
Definition: avcodec.h:1909
uint8_t mbskip
Definition: vp8.h:232
int8_t ref[4]
filter strength adjustment for macroblocks that reference: [0] - intra / VP56_FRAME_CURRENT [1] - VP5...
Definition: vp8.h:216
static int vp7_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2414
void(* filter_mb_row)(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.h:279
void(* vp8_idct_dc_add4y)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
Definition: vp8dsp.h:42
static av_cold int vp8_init_frames(VP8Context *s)
Definition: vp8.c:2730
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
static void free_buffers(VP8Context *s)
Definition: vp8.c:49
int32_t
AVFormatContext * ctx
Definition: movenc.c:48
#define check_thread_pos(td, otd, mb_x_check, mb_y_check)
Definition: vp8.c:2282
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
Definition: os2threads.h:98
static int vp8_read_mv_component(VP56RangeCoder *c, const uint8_t *p)
Definition: vp8.c:819
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:3163
void(* vp8_mc_func)(uint8_t *dst, ptrdiff_t dstStride, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: vp8dsp.h:33
int16_t luma_dc_qmul[2]
luma dc-only block quant
Definition: vp8.h:191
int16_t chroma_qmul[2]
Definition: vp8.h:192
static const uint8_t vp8_pred4x4_prob_intra[10][10][9]
Definition: vp8data.h:196
uint8_t(* top_border)[16+8+8]
Definition: vp8.h:219
int n
Definition: avisynth_c.h:684
ThreadFrame tf
Definition: vp8.h:126
static av_always_inline int decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2286
static av_always_inline void filter_level_for_mb(VP8Context *s, VP8Macroblock *mb, VP8FilterStrength *f, int is_vp7)
Definition: vp8.c:2048
static const int8_t vp7_feature_index_tree[4][2]
Definition: vp8data.h:771
static const uint8_t vp7_feature_value_size[2][4]
Definition: vp8data.h:766
#define vp56_rac_get_prob
Definition: vp56.h:254
static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
Definition: vp8.c:107
static av_always_inline void decode_mb_coeffs(VP8Context *s, VP8ThreadData *td, VP56RangeCoder *c, VP8Macroblock *mb, uint8_t t_nnz[9], uint8_t l_nnz[9], int is_vp7)
Definition: vp8.c:1394
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
static void vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame, VP8Frame *prev_frame)
Definition: vp8.c:2240
#define cat(a, bpp, b)
Definition: vp9dsp_init.h:29
uint8_t segment
Definition: vp8.h:87
#define FF_ARRAY_ELEMS(a)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:3151
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:475
int8_t xoffset
Definition: vp8data.h:63
static const float pred[4]
Definition: siprdata.h:259
static int vp7_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2530
#define IS_VP8
Definition: vp8dsp.h:104
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:1061
static const int8_t mv[256][2]
Definition: 4xm.c:77
static void vp7_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame, VP8Frame *prev_frame)
Definition: vp8.c:2234
static av_always_inline int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1533
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
Definition: vp56.h:271
void(* vp8_v_loop_filter8uv_inner)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:62
void(* vp8_h_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
Definition: vp8dsp.h:70
#define src1
Definition: h264pred.c:139
static av_always_inline void inter_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y)
Apply motion vectors to prediction buffer, chapter 18.
Definition: vp8.c:1902
VP8Frame * curframe
Definition: vp8.h:141
uint8_t simple
Definition: vp8.h:173
void(* vp8_idct_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
Definition: vp8dsp.h:40
VP8Frame frames[5]
Definition: vp8.h:265
Libavcodec external API header.
static const uint8_t vp8_pred8x8c_prob_intra[3]
Definition: vp8data.h:186
uint8_t level
Definition: vp8.h:174
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:218
static void vp8_release_frame(VP8Context *s, VP8Frame *f)
Definition: vp8.c:82
AVBufferRef * seg_map
Definition: vp8.h:127
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
static const uint16_t vp7_yac_qlookup[]
Definition: vp8data.h:790
main external API structure.
Definition: avcodec.h:1722
static int vp7_fade_frame(VP8Context *s, VP56RangeCoder *c)
Definition: vp8.c:452
uint8_t * data
The data buffer.
Definition: buffer.h:89
VP8Frame * next_framep[4]
Definition: vp8.h:140
int mb_layout
This describes the macroblock memory layout.
Definition: vp8.h:276
uint8_t left_nnz[9]
For coeff decode, we need to know whether the above block had non-zero coefficients.
Definition: vp8.h:111
static const uint8_t vp8_mbsplit_prob[3]
Definition: vp8data.h:145
VP56RangeCoder c
header context, includes mb modes and motion vectors
Definition: vp8.h:222
void * buf
Definition: avisynth_c.h:690
int y
Definition: vp8.h:132
int bits
Definition: vp56.h:87
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:98
VP56RangeCoder coeff_partition[8]
Definition: vp8.h:260
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
static const int8_t vp8_pred16x16_tree_inter[4][2]
Definition: vp8data.h:54
int vp7
Definition: vp8.h:281
static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:260
int coded_height
Definition: avcodec.h:1924
static int vp8_update_dimensions(VP8Context *s, int width, int height)
Definition: vp8.c:209
int index
Definition: gxfenc.c:89
VP8FilterStrength * filter_strength
Definition: vp8.h:122
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2452
VP8intmv mv_min
Definition: vp8.h:153
static av_always_inline void clamp_mv(VP8Context *s, VP56mv *dst, const VP56mv *src)
Definition: vp8.c:775
void(* vp8_idct_dc_add4uv)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
Definition: vp8dsp.h:44
static av_always_inline int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
Definition: vp8.c:1515
static void vp78_update_probability_tables(VP8Context *s)
Definition: vp8.c:377
#define MV_EDGE_CHECK(n)
static const int8_t vp8_pred4x4_tree[9][2]
Definition: vp8data.h:168
uint8_t enabled
whether each mb can have a different strength based on mode/ref
Definition: vp8.h:165
static av_always_inline void idct_mb(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb)
Definition: vp8.c:1986
static void vp78_update_pred16x16_pred8x8_mvc_probabilities(VP8Context *s, int mvc_size)
Definition: vp8.c:396
static av_always_inline int read_mv_component(VP56RangeCoder *c, const uint8_t *p, int vp7)
Motion vector coding, 17.1.
Definition: vp8.c:786
static const uint8_t subpel_idx[3][8]
Definition: vp8.c:1709
static void update_refs(VP8Context *s)
Definition: vp8.c:416
static av_always_inline int vp8_rac_get_coeff(VP56RangeCoder *c, const uint8_t *prob)
Definition: vp56.h:393
static const uint8_t vp8_coeff_band[16]
Definition: vp8data.h:325
#define u(width,...)
int allocate_progress
Whether to allocate progress for frame threading.
Definition: internal.h:126
static const uint16_t vp8_ac_qlookup[VP8_MAX_QUANT+1]
Definition: vp8data.h:722
static const uint8_t vp8_pred16x16_prob_intra[4]
Definition: vp8data.h:161
uint8_t score
Definition: vp8data.h:65
struct VP8Context::@148 prob[2]
These are all of the updatable probabilities for binary decisions.
mfxU16 profile
Definition: qsvenc.c:44
static av_always_inline void decode_intra4x4_modes(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int mb_x, int keyframe, int layout)
Definition: vp8.c:1134
static int vp8_rac_get_uint(VP56RangeCoder *c, int bits)
Definition: vp56.h:324
#define DC_127_PRED8x8
Definition: h264pred.h:85
void(* vp8_luma_dc_wht)(int16_t block[4][4][16], int16_t dc[16])
Definition: vp8dsp.h:38
Definition: vp56.h:66
av_cold int ff_vp8_decode_init(AVCodecContext *avctx)
Definition: vp8.c:2785
int update_altref
Definition: vp8.h:247
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:201
VP8intmv mv_max
Definition: vp8.h:154
uint8_t feature_enabled[4]
Macroblock features (VP7)
Definition: vp8.h:298
int8_t mode[VP8_MVMODE_SPLIT+1]
filter strength adjustment for the following macroblock modes: [0-3] - i16x16 (always zero) [4] - i4x...
Definition: vp8.h:207
2 8x16 blocks (horizontal)
Definition: vp8.h:67
av_cold int ff_vp8_decode_free(AVCodecContext *avctx)
Definition: vp8.c:2715
Definition: vp9.h:84
#define AV_ZERO128(d)
Definition: intreadwrite.h:627
uint8_t pred8x8c[3]
Definition: vp8.h:237
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:474
discard all non reference
Definition: avcodec.h:818
static av_always_inline void vp78_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *curframe, VP8Frame *prev_frame, int is_vp7)
Definition: vp8.c:2202
uint8_t partitioning
Definition: vp8.h:85
#define AV_ZERO64(d)
Definition: intreadwrite.h:623
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
static av_always_inline void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, uint8_t *segment, uint8_t *ref, int layout, int is_vp7)
Definition: vp8.c:1169
void(* vp8_v_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
Definition: vp8dsp.h:69
int16_t x
Definition: vp56.h:67
common internal api header.
static void vp8_get_quants(VP8Context *s)
Definition: vp8.c:306
if(ret< 0)
Definition: vf_mcdeint.c:282
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
#define LOCAL_ALIGNED(a, t, v,...)
Definition: internal.h:114
#define AV_COPY128(d, s)
Definition: intreadwrite.h:599
static int ref_frame(Vp3DecodeContext *s, ThreadFrame *dst, ThreadFrame *src)
Definition: vp3.c:1915
int wait_mb_pos
Definition: vp8.h:118
static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
Definition: vp8.c:69
uint8_t chroma_pred_mode
Definition: vp8.h:86
static double c[64]
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
static av_always_inline void backup_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, ptrdiff_t linesize, ptrdiff_t uvlinesize, int simple)
Definition: vp8.c:1469
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
Definition: os2threads.h:127
#define DC_129_PRED8x8
Definition: h264pred.h:86
enum AVDiscard skip_loop_filter
Skip loop filtering for selected frames.
Definition: avcodec.h:3331
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_YASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
static av_always_inline int vp8_rac_get(VP56RangeCoder *c)
Definition: vp56.h:308
int invisible
Definition: vp8.h:244
static av_always_inline int decode_splitmvs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int layout, int is_vp7)
Split motion vector prediction, 16.4.
Definition: vp8.c:842
static const SiprModeParam modes[MODE_COUNT]
Definition: sipr.c:69
int ref_count[3]
Definition: vp8.h:157
void * priv_data
Definition: avcodec.h:1764
int(* update_thread_context)(AVCodecContext *dst, const AVCodecContext *src)
Copy necessary context variables from a previous thread context to the current one.
Definition: avcodec.h:3726
static av_always_inline int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1549
void(* vp8_h_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:50
#define MODE_I4x4
Definition: vp8.h:57
static int vp7_calculate_mb_offset(int mb_x, int mb_y, int mb_width, int xoffset, int yoffset, int boundary, int *edge_x, int *edge_y)
The vp7 reference decoder uses a padding macroblock column (added to right edge of the frame) to guar...
Definition: vp8.c:924
#define XCHG(a, b, xchg)
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:3211
#define update_pos(td, mb_y, mb_x)
Definition: vp8.c:2283
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1772
#define HOR_VP8_PRED
unaveraged version of HOR_PRED, see
Definition: h264pred.h:63
VP8DSPContext vp8dsp
Definition: vp8.h:262
static av_always_inline int update_dimensions(VP8Context *s, int width, int height, int is_vp7)
Definition: vp8.c:150
int thread_nr
Definition: vp8.h:112
static av_always_inline void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, ptrdiff_t linesize, ptrdiff_t uvlinesize, int mb_x, int mb_y, int mb_width, int simple, int xchg)
Definition: vp8.c:1481
#define AV_ZERO32(d)
Definition: intreadwrite.h:619
static av_always_inline int vp78_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2498
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
void(* vp8_idct_dc_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
Definition: vp8dsp.h:41
uint64_t layout
AVDiscard
Definition: avcodec.h:813
static av_unused int vp8_rac_get_nn(VP56RangeCoder *c)
Definition: vp56.h:358
void(* vp8_v_loop_filter16y_inner)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:58
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
#define av_uninit(x)
Definition: attributes.h:148
const uint8_t * buffer
Definition: vp56.h:89
static av_always_inline void vp8_mc_luma(VP8Context *s, VP8ThreadData *td, uint8_t *dst, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3])
luma MC function
Definition: vp8.c:1733
static const uint8_t vp8_token_update_probs[4][8][3][NUM_DCT_TOKENS-1]
Definition: vp8data.h:540
static av_always_inline void filter_mb(VP8Context *s, uint8_t *dst[3], VP8FilterStrength *f, int mb_x, int mb_y, int is_vp7)
Definition: vp8.c:2081
#define av_freep(p)
#define IS_VP7
Definition: vp8dsp.h:103
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: ffmpeg.c:2242
#define av_always_inline
Definition: attributes.h:39
int8_t filter_level[4]
base loop filter level
Definition: vp8.h:169
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
static const int vp8_mode_contexts[6][4]
Definition: vp8data.h:118
static const uint8_t vp8_dct_cat1_prob[]
Definition: vp8data.h:342
#define FFSWAP(type, a, b)
Definition: common.h:99
uint8_t intra
Definition: vp8.h:233
static av_always_inline void vp8_decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
Definition: vp8.c:1034
uint8_t non_zero_count_cache[6][4]
This is the index plus one of the last non-zero coeff for each of the blocks in the current macrobloc...
Definition: vp8.h:104
uint8_t skip
Definition: vp8.h:80
void ff_vp8dsp_init(VP8DSPContext *c)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
static void vp78_reset_probability_tables(VP8Context *s)
Definition: vp8.c:368
This structure stores compressed data.
Definition: avcodec.h:1624
#define VP8_MVC_SIZE
Definition: vp8.c:394
static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:491
uint8_t profile
Definition: vp8.h:152
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1389
const uint8_t *const ff_vp8_dct_cat_prob[]
Definition: vp8data.h:362
mode
Use these values in ebur128_init (or'ed).
Definition: ebur128.h:83
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:994
VP8ThreadData * thread_data
Definition: vp8.h:137
struct VP8Context::@145 filter
Predicted.
Definition: avutil.h:275
int thread_mb_pos
Definition: vp8.h:117
2x2 blocks of 8x8px each
Definition: vp8.h:68
static av_always_inline void filter_mb_simple(VP8Context *s, uint8_t *dst, VP8FilterStrength *f, int mb_x, int mb_y)
Definition: vp8.c:2168
static const VP7MVPred vp7_mv_pred[VP7_MV_PRED_COUNT]
Definition: vp8data.h:69
static const uint16_t vp7_y2ac_qlookup[]
Definition: vp8data.h:816
static const uint8_t vp7_submv_prob[3]
Definition: vp8data.h:149
static av_always_inline int vp78_decode_init(AVCodecContext *avctx, int is_vp7)
Definition: vp8.c:2742
#define AV_WN64(p, v)
Definition: intreadwrite.h:385
uint8_t filter_level
Definition: vp8.h:74