FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vp9.c
Go to the documentation of this file.
1 /*
2  * VP9 compatible video decoder
3  *
4  * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5  * Copyright (C) 2013 Clément Bœsch <u pkh me>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "avcodec.h"
25 #include "get_bits.h"
26 #include "internal.h"
27 #include "profiles.h"
28 #include "thread.h"
29 #include "videodsp.h"
30 #include "vp56.h"
31 #include "vp9.h"
32 #include "vp9data.h"
33 #include "vp9dec.h"
34 #include "libavutil/avassert.h"
35 #include "libavutil/pixdesc.h"
36 
37 #define VP9_SYNCCODE 0x498342
38 
39 static void vp9_frame_unref(AVCodecContext *avctx, VP9Frame *f)
40 {
41  ff_thread_release_buffer(avctx, &f->tf);
46 }
47 
48 static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
49 {
50  VP9Context *s = avctx->priv_data;
51  int ret, sz;
52 
54  if (ret < 0)
55  return ret;
56 
57  sz = 64 * s->sb_cols * s->sb_rows;
58  f->extradata = av_buffer_allocz(sz * (1 + sizeof(VP9mvrefPair)));
59  if (!f->extradata) {
60  goto fail;
61  }
62 
64  f->mv = (VP9mvrefPair *) (f->extradata->data + sz);
65 
66  if (avctx->hwaccel) {
67  const AVHWAccel *hwaccel = avctx->hwaccel;
69  if (hwaccel->frame_priv_data_size) {
71  if (!f->hwaccel_priv_buf)
72  goto fail;
74  }
75  }
76 
77  return 0;
78 
79 fail:
80  vp9_frame_unref(avctx, f);
81  return AVERROR(ENOMEM);
82 }
83 
84 static int vp9_frame_ref(AVCodecContext *avctx, VP9Frame *dst, VP9Frame *src)
85 {
86  int ret;
87 
88  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
89  if (ret < 0)
90  return ret;
91 
92  dst->extradata = av_buffer_ref(src->extradata);
93  if (!dst->extradata)
94  goto fail;
95 
97  dst->mv = src->mv;
98  dst->uses_2pass = src->uses_2pass;
99 
100  if (src->hwaccel_picture_private) {
102  if (!dst->hwaccel_priv_buf)
103  goto fail;
105  }
106 
107  return 0;
108 
109 fail:
110  vp9_frame_unref(avctx, dst);
111  return AVERROR(ENOMEM);
112 }
113 
114 static int update_size(AVCodecContext *avctx, int w, int h)
115 {
116 #define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + CONFIG_VP9_D3D11VA_HWACCEL * 2 + CONFIG_VP9_VAAPI_HWACCEL)
117  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
118  VP9Context *s = avctx->priv_data;
119  uint8_t *p;
120  int bytesperpixel = s->bytesperpixel, ret, cols, rows;
121 
122  av_assert0(w > 0 && h > 0);
123 
124  if (!(s->pix_fmt == s->gf_fmt && w == s->w && h == s->h)) {
125  if ((ret = ff_set_dimensions(avctx, w, h)) < 0)
126  return ret;
127 
128  switch (s->pix_fmt) {
129  case AV_PIX_FMT_YUV420P:
130 #if CONFIG_VP9_DXVA2_HWACCEL
131  *fmtp++ = AV_PIX_FMT_DXVA2_VLD;
132 #endif
133 #if CONFIG_VP9_D3D11VA_HWACCEL
134  *fmtp++ = AV_PIX_FMT_D3D11VA_VLD;
135  *fmtp++ = AV_PIX_FMT_D3D11;
136 #endif
137 #if CONFIG_VP9_VAAPI_HWACCEL
138  *fmtp++ = AV_PIX_FMT_VAAPI;
139 #endif
140  break;
143 #if CONFIG_VP9_VAAPI_HWACCEL
144  *fmtp++ = AV_PIX_FMT_VAAPI;
145 #endif
146  break;
147  }
148 
149  *fmtp++ = s->pix_fmt;
150  *fmtp = AV_PIX_FMT_NONE;
151 
152  ret = ff_thread_get_format(avctx, pix_fmts);
153  if (ret < 0)
154  return ret;
155 
156  avctx->pix_fmt = ret;
157  s->gf_fmt = s->pix_fmt;
158  s->w = w;
159  s->h = h;
160  }
161 
162  cols = (w + 7) >> 3;
163  rows = (h + 7) >> 3;
164 
165  if (s->intra_pred_data[0] && cols == s->cols && rows == s->rows && s->pix_fmt == s->last_fmt)
166  return 0;
167 
168  s->last_fmt = s->pix_fmt;
169  s->sb_cols = (w + 63) >> 6;
170  s->sb_rows = (h + 63) >> 6;
171  s->cols = (w + 7) >> 3;
172  s->rows = (h + 7) >> 3;
173 
174 #define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)
175  av_freep(&s->intra_pred_data[0]);
176  // FIXME we slightly over-allocate here for subsampled chroma, but a little
177  // bit of padding shouldn't affect performance...
178  p = av_malloc(s->sb_cols * (128 + 192 * bytesperpixel +
179  sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx)));
180  if (!p)
181  return AVERROR(ENOMEM);
182  assign(s->intra_pred_data[0], uint8_t *, 64 * bytesperpixel);
183  assign(s->intra_pred_data[1], uint8_t *, 64 * bytesperpixel);
184  assign(s->intra_pred_data[2], uint8_t *, 64 * bytesperpixel);
185  assign(s->above_y_nnz_ctx, uint8_t *, 16);
186  assign(s->above_mode_ctx, uint8_t *, 16);
187  assign(s->above_mv_ctx, VP56mv(*)[2], 16);
188  assign(s->above_uv_nnz_ctx[0], uint8_t *, 16);
189  assign(s->above_uv_nnz_ctx[1], uint8_t *, 16);
191  assign(s->above_skip_ctx, uint8_t *, 8);
192  assign(s->above_txfm_ctx, uint8_t *, 8);
193  assign(s->above_segpred_ctx, uint8_t *, 8);
194  assign(s->above_intra_ctx, uint8_t *, 8);
195  assign(s->above_comp_ctx, uint8_t *, 8);
196  assign(s->above_ref_ctx, uint8_t *, 8);
197  assign(s->above_filter_ctx, uint8_t *, 8);
198  assign(s->lflvl, VP9Filter *, 1);
199 #undef assign
200 
201  // these will be re-allocated a little later
202  av_freep(&s->b_base);
203  av_freep(&s->block_base);
204 
205  if (s->s.h.bpp != s->last_bpp) {
206  ff_vp9dsp_init(&s->dsp, s->s.h.bpp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
207  ff_videodsp_init(&s->vdsp, s->s.h.bpp);
208  s->last_bpp = s->s.h.bpp;
209  }
210 
211  return 0;
212 }
213 
215 {
216  VP9Context *s = avctx->priv_data;
217  int chroma_blocks, chroma_eobs, bytesperpixel = s->bytesperpixel;
218 
220  return 0;
221 
222  av_free(s->b_base);
223  av_free(s->block_base);
224  chroma_blocks = 64 * 64 >> (s->ss_h + s->ss_v);
225  chroma_eobs = 16 * 16 >> (s->ss_h + s->ss_v);
226  if (s->s.frames[CUR_FRAME].uses_2pass) {
227  int sbs = s->sb_cols * s->sb_rows;
228 
229  s->b_base = av_malloc_array(s->cols * s->rows, sizeof(VP9Block));
230  s->block_base = av_mallocz(((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
231  16 * 16 + 2 * chroma_eobs) * sbs);
232  if (!s->b_base || !s->block_base)
233  return AVERROR(ENOMEM);
234  s->uvblock_base[0] = s->block_base + sbs * 64 * 64 * bytesperpixel;
235  s->uvblock_base[1] = s->uvblock_base[0] + sbs * chroma_blocks * bytesperpixel;
236  s->eob_base = (uint8_t *) (s->uvblock_base[1] + sbs * chroma_blocks * bytesperpixel);
237  s->uveob_base[0] = s->eob_base + 16 * 16 * sbs;
238  s->uveob_base[1] = s->uveob_base[0] + chroma_eobs * sbs;
239  } else {
240  s->b_base = av_malloc(sizeof(VP9Block));
241  s->block_base = av_mallocz((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
242  16 * 16 + 2 * chroma_eobs);
243  if (!s->b_base || !s->block_base)
244  return AVERROR(ENOMEM);
245  s->uvblock_base[0] = s->block_base + 64 * 64 * bytesperpixel;
246  s->uvblock_base[1] = s->uvblock_base[0] + chroma_blocks * bytesperpixel;
247  s->eob_base = (uint8_t *) (s->uvblock_base[1] + chroma_blocks * bytesperpixel);
248  s->uveob_base[0] = s->eob_base + 16 * 16;
249  s->uveob_base[1] = s->uveob_base[0] + chroma_eobs;
250  }
252 
253  return 0;
254 }
255 
256 // The sign bit is at the end, not the start, of a bit sequence
258 {
259  int v = get_bits(gb, n);
260  return get_bits1(gb) ? -v : v;
261 }
262 
263 static av_always_inline int inv_recenter_nonneg(int v, int m)
264 {
265  if (v > 2 * m)
266  return v;
267  if (v & 1)
268  return m - ((v + 1) >> 1);
269  return m + (v >> 1);
270 }
271 
272 // differential forward probability updates
273 static int update_prob(VP56RangeCoder *c, int p)
274 {
275  static const int inv_map_table[255] = {
276  7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
277  189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
278  10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
279  25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39,
280  40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54,
281  55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
282  70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
283  86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100,
284  101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
285  116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
286  131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
287  146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
288  161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
289  177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
290  192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
291  207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
292  222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
293  237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
294  252, 253, 253,
295  };
296  int d;
297 
298  /* This code is trying to do a differential probability update. For a
299  * current probability A in the range [1, 255], the difference to a new
300  * probability of any value can be expressed differentially as 1-A, 255-A
301  * where some part of this (absolute range) exists both in positive as
302  * well as the negative part, whereas another part only exists in one
303  * half. We're trying to code this shared part differentially, i.e.
304  * times two where the value of the lowest bit specifies the sign, and
305  * the single part is then coded on top of this. This absolute difference
306  * then again has a value of [0, 254], but a bigger value in this range
307  * indicates that we're further away from the original value A, so we
308  * can code this as a VLC code, since higher values are increasingly
309  * unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough'
310  * updates vs. the 'fine, exact' updates further down the range, which
311  * adds one extra dimension to this differential update model. */
312 
313  if (!vp8_rac_get(c)) {
314  d = vp8_rac_get_uint(c, 4) + 0;
315  } else if (!vp8_rac_get(c)) {
316  d = vp8_rac_get_uint(c, 4) + 16;
317  } else if (!vp8_rac_get(c)) {
318  d = vp8_rac_get_uint(c, 5) + 32;
319  } else {
320  d = vp8_rac_get_uint(c, 7);
321  if (d >= 65)
322  d = (d << 1) - 65 + vp8_rac_get(c);
323  d += 64;
324  av_assert2(d < FF_ARRAY_ELEMS(inv_map_table));
325  }
326 
327  return p <= 128 ? 1 + inv_recenter_nonneg(inv_map_table[d], p - 1) :
328  255 - inv_recenter_nonneg(inv_map_table[d], 255 - p);
329 }
330 
332 {
333  static const enum AVColorSpace colorspaces[8] = {
336  };
337  VP9Context *s = avctx->priv_data;
338  int bits = avctx->profile <= 1 ? 0 : 1 + get_bits1(&s->gb); // 0:8, 1:10, 2:12
339 
340  s->bpp_index = bits;
341  s->s.h.bpp = 8 + bits * 2;
342  s->bytesperpixel = (7 + s->s.h.bpp) >> 3;
343  avctx->colorspace = colorspaces[get_bits(&s->gb, 3)];
344  if (avctx->colorspace == AVCOL_SPC_RGB) { // RGB = profile 1
345  static const enum AVPixelFormat pix_fmt_rgb[3] = {
347  };
348  s->ss_h = s->ss_v = 0;
349  avctx->color_range = AVCOL_RANGE_JPEG;
350  s->pix_fmt = pix_fmt_rgb[bits];
351  if (avctx->profile & 1) {
352  if (get_bits1(&s->gb)) {
353  av_log(avctx, AV_LOG_ERROR, "Reserved bit set in RGB\n");
354  return AVERROR_INVALIDDATA;
355  }
356  } else {
357  av_log(avctx, AV_LOG_ERROR, "RGB not supported in profile %d\n",
358  avctx->profile);
359  return AVERROR_INVALIDDATA;
360  }
361  } else {
362  static const enum AVPixelFormat pix_fmt_for_ss[3][2 /* v */][2 /* h */] = {
369  };
371  if (avctx->profile & 1) {
372  s->ss_h = get_bits1(&s->gb);
373  s->ss_v = get_bits1(&s->gb);
374  s->pix_fmt = pix_fmt_for_ss[bits][s->ss_v][s->ss_h];
375  if (s->pix_fmt == AV_PIX_FMT_YUV420P) {
376  av_log(avctx, AV_LOG_ERROR, "YUV 4:2:0 not supported in profile %d\n",
377  avctx->profile);
378  return AVERROR_INVALIDDATA;
379  } else if (get_bits1(&s->gb)) {
380  av_log(avctx, AV_LOG_ERROR, "Profile %d color details reserved bit set\n",
381  avctx->profile);
382  return AVERROR_INVALIDDATA;
383  }
384  } else {
385  s->ss_h = s->ss_v = 1;
386  s->pix_fmt = pix_fmt_for_ss[bits][1][1];
387  }
388  }
389 
390  return 0;
391 }
392 
394  const uint8_t *data, int size, int *ref)
395 {
396  VP9Context *s = avctx->priv_data;
397  int c, i, j, k, l, m, n, w, h, max, size2, ret, sharp;
398  int last_invisible;
399  const uint8_t *data2;
400 
401  /* general header */
402  if ((ret = init_get_bits8(&s->gb, data, size)) < 0) {
403  av_log(avctx, AV_LOG_ERROR, "Failed to initialize bitstream reader\n");
404  return ret;
405  }
406  if (get_bits(&s->gb, 2) != 0x2) { // frame marker
407  av_log(avctx, AV_LOG_ERROR, "Invalid frame marker\n");
408  return AVERROR_INVALIDDATA;
409  }
410  avctx->profile = get_bits1(&s->gb);
411  avctx->profile |= get_bits1(&s->gb) << 1;
412  if (avctx->profile == 3) avctx->profile += get_bits1(&s->gb);
413  if (avctx->profile > 3) {
414  av_log(avctx, AV_LOG_ERROR, "Profile %d is not yet supported\n", avctx->profile);
415  return AVERROR_INVALIDDATA;
416  }
417  s->s.h.profile = avctx->profile;
418  if (get_bits1(&s->gb)) {
419  *ref = get_bits(&s->gb, 3);
420  return 0;
421  }
422 
423  s->last_keyframe = s->s.h.keyframe;
424  s->s.h.keyframe = !get_bits1(&s->gb);
425 
426  last_invisible = s->s.h.invisible;
427  s->s.h.invisible = !get_bits1(&s->gb);
428  s->s.h.errorres = get_bits1(&s->gb);
429  s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible;
430 
431  if (s->s.h.keyframe) {
432  if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode
433  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
434  return AVERROR_INVALIDDATA;
435  }
436  if ((ret = read_colorspace_details(avctx)) < 0)
437  return ret;
438  // for profile 1, here follows the subsampling bits
439  s->s.h.refreshrefmask = 0xff;
440  w = get_bits(&s->gb, 16) + 1;
441  h = get_bits(&s->gb, 16) + 1;
442  if (get_bits1(&s->gb)) // display size
443  skip_bits(&s->gb, 32);
444  } else {
445  s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0;
446  s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2);
447  if (s->s.h.intraonly) {
448  if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode
449  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
450  return AVERROR_INVALIDDATA;
451  }
452  if (avctx->profile >= 1) {
453  if ((ret = read_colorspace_details(avctx)) < 0)
454  return ret;
455  } else {
456  s->ss_h = s->ss_v = 1;
457  s->s.h.bpp = 8;
458  s->bpp_index = 0;
459  s->bytesperpixel = 1;
460  s->pix_fmt = AV_PIX_FMT_YUV420P;
461  avctx->colorspace = AVCOL_SPC_BT470BG;
462  avctx->color_range = AVCOL_RANGE_JPEG;
463  }
464  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
465  w = get_bits(&s->gb, 16) + 1;
466  h = get_bits(&s->gb, 16) + 1;
467  if (get_bits1(&s->gb)) // display size
468  skip_bits(&s->gb, 32);
469  } else {
470  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
471  s->s.h.refidx[0] = get_bits(&s->gb, 3);
472  s->s.h.signbias[0] = get_bits1(&s->gb) && !s->s.h.errorres;
473  s->s.h.refidx[1] = get_bits(&s->gb, 3);
474  s->s.h.signbias[1] = get_bits1(&s->gb) && !s->s.h.errorres;
475  s->s.h.refidx[2] = get_bits(&s->gb, 3);
476  s->s.h.signbias[2] = get_bits1(&s->gb) && !s->s.h.errorres;
477  if (!s->s.refs[s->s.h.refidx[0]].f->buf[0] ||
478  !s->s.refs[s->s.h.refidx[1]].f->buf[0] ||
479  !s->s.refs[s->s.h.refidx[2]].f->buf[0]) {
480  av_log(avctx, AV_LOG_ERROR, "Not all references are available\n");
481  return AVERROR_INVALIDDATA;
482  }
483  if (get_bits1(&s->gb)) {
484  w = s->s.refs[s->s.h.refidx[0]].f->width;
485  h = s->s.refs[s->s.h.refidx[0]].f->height;
486  } else if (get_bits1(&s->gb)) {
487  w = s->s.refs[s->s.h.refidx[1]].f->width;
488  h = s->s.refs[s->s.h.refidx[1]].f->height;
489  } else if (get_bits1(&s->gb)) {
490  w = s->s.refs[s->s.h.refidx[2]].f->width;
491  h = s->s.refs[s->s.h.refidx[2]].f->height;
492  } else {
493  w = get_bits(&s->gb, 16) + 1;
494  h = get_bits(&s->gb, 16) + 1;
495  }
496  // Note that in this code, "CUR_FRAME" is actually before we
497  // have formally allocated a frame, and thus actually represents
498  // the _last_ frame
499  s->s.h.use_last_frame_mvs &= s->s.frames[CUR_FRAME].tf.f->width == w &&
500  s->s.frames[CUR_FRAME].tf.f->height == h;
501  if (get_bits1(&s->gb)) // display size
502  skip_bits(&s->gb, 32);
503  s->s.h.highprecisionmvs = get_bits1(&s->gb);
505  get_bits(&s->gb, 2);
506  s->s.h.allowcompinter = s->s.h.signbias[0] != s->s.h.signbias[1] ||
507  s->s.h.signbias[0] != s->s.h.signbias[2];
508  if (s->s.h.allowcompinter) {
509  if (s->s.h.signbias[0] == s->s.h.signbias[1]) {
510  s->s.h.fixcompref = 2;
511  s->s.h.varcompref[0] = 0;
512  s->s.h.varcompref[1] = 1;
513  } else if (s->s.h.signbias[0] == s->s.h.signbias[2]) {
514  s->s.h.fixcompref = 1;
515  s->s.h.varcompref[0] = 0;
516  s->s.h.varcompref[1] = 2;
517  } else {
518  s->s.h.fixcompref = 0;
519  s->s.h.varcompref[0] = 1;
520  s->s.h.varcompref[1] = 2;
521  }
522  }
523  }
524  }
525  s->s.h.refreshctx = s->s.h.errorres ? 0 : get_bits1(&s->gb);
526  s->s.h.parallelmode = s->s.h.errorres ? 1 : get_bits1(&s->gb);
527  s->s.h.framectxid = c = get_bits(&s->gb, 2);
528  if (s->s.h.keyframe || s->s.h.intraonly)
529  s->s.h.framectxid = 0; // BUG: libvpx ignores this field in keyframes
530 
531  /* loopfilter header data */
532  if (s->s.h.keyframe || s->s.h.errorres || s->s.h.intraonly) {
533  // reset loopfilter defaults
534  s->s.h.lf_delta.ref[0] = 1;
535  s->s.h.lf_delta.ref[1] = 0;
536  s->s.h.lf_delta.ref[2] = -1;
537  s->s.h.lf_delta.ref[3] = -1;
538  s->s.h.lf_delta.mode[0] = 0;
539  s->s.h.lf_delta.mode[1] = 0;
540  memset(s->s.h.segmentation.feat, 0, sizeof(s->s.h.segmentation.feat));
541  }
542  s->s.h.filter.level = get_bits(&s->gb, 6);
543  sharp = get_bits(&s->gb, 3);
544  // if sharpness changed, reinit lim/mblim LUTs. if it didn't change, keep
545  // the old cache values since they are still valid
546  if (s->s.h.filter.sharpness != sharp)
547  memset(s->filter_lut.lim_lut, 0, sizeof(s->filter_lut.lim_lut));
548  s->s.h.filter.sharpness = sharp;
549  if ((s->s.h.lf_delta.enabled = get_bits1(&s->gb))) {
550  if ((s->s.h.lf_delta.updated = get_bits1(&s->gb))) {
551  for (i = 0; i < 4; i++)
552  if (get_bits1(&s->gb))
553  s->s.h.lf_delta.ref[i] = get_sbits_inv(&s->gb, 6);
554  for (i = 0; i < 2; i++)
555  if (get_bits1(&s->gb))
556  s->s.h.lf_delta.mode[i] = get_sbits_inv(&s->gb, 6);
557  }
558  }
559 
560  /* quantization header data */
561  s->s.h.yac_qi = get_bits(&s->gb, 8);
562  s->s.h.ydc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
563  s->s.h.uvdc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
564  s->s.h.uvac_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
565  s->s.h.lossless = s->s.h.yac_qi == 0 && s->s.h.ydc_qdelta == 0 &&
566  s->s.h.uvdc_qdelta == 0 && s->s.h.uvac_qdelta == 0;
567  if (s->s.h.lossless)
569 
570  /* segmentation header info */
571  if ((s->s.h.segmentation.enabled = get_bits1(&s->gb))) {
572  if ((s->s.h.segmentation.update_map = get_bits1(&s->gb))) {
573  for (i = 0; i < 7; i++)
574  s->s.h.segmentation.prob[i] = get_bits1(&s->gb) ?
575  get_bits(&s->gb, 8) : 255;
576  if ((s->s.h.segmentation.temporal = get_bits1(&s->gb)))
577  for (i = 0; i < 3; i++)
578  s->s.h.segmentation.pred_prob[i] = get_bits1(&s->gb) ?
579  get_bits(&s->gb, 8) : 255;
580  }
581 
582  if (get_bits1(&s->gb)) {
584  for (i = 0; i < 8; i++) {
585  if ((s->s.h.segmentation.feat[i].q_enabled = get_bits1(&s->gb)))
586  s->s.h.segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8);
587  if ((s->s.h.segmentation.feat[i].lf_enabled = get_bits1(&s->gb)))
588  s->s.h.segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6);
589  if ((s->s.h.segmentation.feat[i].ref_enabled = get_bits1(&s->gb)))
590  s->s.h.segmentation.feat[i].ref_val = get_bits(&s->gb, 2);
591  s->s.h.segmentation.feat[i].skip_enabled = get_bits1(&s->gb);
592  }
593  }
594  }
595 
596  // set qmul[] based on Y/UV, AC/DC and segmentation Q idx deltas
597  for (i = 0; i < (s->s.h.segmentation.enabled ? 8 : 1); i++) {
598  int qyac, qydc, quvac, quvdc, lflvl, sh;
599 
600  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].q_enabled) {
601  if (s->s.h.segmentation.absolute_vals)
602  qyac = av_clip_uintp2(s->s.h.segmentation.feat[i].q_val, 8);
603  else
604  qyac = av_clip_uintp2(s->s.h.yac_qi + s->s.h.segmentation.feat[i].q_val, 8);
605  } else {
606  qyac = s->s.h.yac_qi;
607  }
608  qydc = av_clip_uintp2(qyac + s->s.h.ydc_qdelta, 8);
609  quvdc = av_clip_uintp2(qyac + s->s.h.uvdc_qdelta, 8);
610  quvac = av_clip_uintp2(qyac + s->s.h.uvac_qdelta, 8);
611  qyac = av_clip_uintp2(qyac, 8);
612 
613  s->s.h.segmentation.feat[i].qmul[0][0] = ff_vp9_dc_qlookup[s->bpp_index][qydc];
614  s->s.h.segmentation.feat[i].qmul[0][1] = ff_vp9_ac_qlookup[s->bpp_index][qyac];
615  s->s.h.segmentation.feat[i].qmul[1][0] = ff_vp9_dc_qlookup[s->bpp_index][quvdc];
616  s->s.h.segmentation.feat[i].qmul[1][1] = ff_vp9_ac_qlookup[s->bpp_index][quvac];
617 
618  sh = s->s.h.filter.level >= 32;
619  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].lf_enabled) {
620  if (s->s.h.segmentation.absolute_vals)
621  lflvl = av_clip_uintp2(s->s.h.segmentation.feat[i].lf_val, 6);
622  else
623  lflvl = av_clip_uintp2(s->s.h.filter.level + s->s.h.segmentation.feat[i].lf_val, 6);
624  } else {
625  lflvl = s->s.h.filter.level;
626  }
627  if (s->s.h.lf_delta.enabled) {
628  s->s.h.segmentation.feat[i].lflvl[0][0] =
629  s->s.h.segmentation.feat[i].lflvl[0][1] =
630  av_clip_uintp2(lflvl + (s->s.h.lf_delta.ref[0] * (1 << sh)), 6);
631  for (j = 1; j < 4; j++) {
632  s->s.h.segmentation.feat[i].lflvl[j][0] =
633  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
634  s->s.h.lf_delta.mode[0]) * (1 << sh)), 6);
635  s->s.h.segmentation.feat[i].lflvl[j][1] =
636  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
637  s->s.h.lf_delta.mode[1]) * (1 << sh)), 6);
638  }
639  } else {
640  memset(s->s.h.segmentation.feat[i].lflvl, lflvl,
641  sizeof(s->s.h.segmentation.feat[i].lflvl));
642  }
643  }
644 
645  /* tiling info */
646  if ((ret = update_size(avctx, w, h)) < 0) {
647  av_log(avctx, AV_LOG_ERROR, "Failed to initialize decoder for %dx%d @ %d\n",
648  w, h, s->pix_fmt);
649  return ret;
650  }
651  for (s->s.h.tiling.log2_tile_cols = 0;
652  s->sb_cols > (64 << s->s.h.tiling.log2_tile_cols);
653  s->s.h.tiling.log2_tile_cols++) ;
654  for (max = 0; (s->sb_cols >> max) >= 4; max++) ;
655  max = FFMAX(0, max - 1);
656  while (max > s->s.h.tiling.log2_tile_cols) {
657  if (get_bits1(&s->gb))
658  s->s.h.tiling.log2_tile_cols++;
659  else
660  break;
661  }
662  s->s.h.tiling.log2_tile_rows = decode012(&s->gb);
663  s->s.h.tiling.tile_rows = 1 << s->s.h.tiling.log2_tile_rows;
664  if (s->s.h.tiling.tile_cols != (1 << s->s.h.tiling.log2_tile_cols)) {
665  s->s.h.tiling.tile_cols = 1 << s->s.h.tiling.log2_tile_cols;
666  s->c_b = av_fast_realloc(s->c_b, &s->c_b_size,
667  sizeof(VP56RangeCoder) * s->s.h.tiling.tile_cols);
668  if (!s->c_b) {
669  av_log(avctx, AV_LOG_ERROR, "Ran out of memory during range coder init\n");
670  return AVERROR(ENOMEM);
671  }
672  }
673 
674  /* check reference frames */
675  if (!s->s.h.keyframe && !s->s.h.intraonly) {
676  for (i = 0; i < 3; i++) {
677  AVFrame *ref = s->s.refs[s->s.h.refidx[i]].f;
678  int refw = ref->width, refh = ref->height;
679 
680  if (ref->format != avctx->pix_fmt) {
681  av_log(avctx, AV_LOG_ERROR,
682  "Ref pixfmt (%s) did not match current frame (%s)",
684  av_get_pix_fmt_name(avctx->pix_fmt));
685  return AVERROR_INVALIDDATA;
686  } else if (refw == w && refh == h) {
687  s->mvscale[i][0] = s->mvscale[i][1] = 0;
688  } else {
689  if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 * refh) {
690  av_log(avctx, AV_LOG_ERROR,
691  "Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
692  refw, refh, w, h);
693  return AVERROR_INVALIDDATA;
694  }
695  s->mvscale[i][0] = (refw << 14) / w;
696  s->mvscale[i][1] = (refh << 14) / h;
697  s->mvstep[i][0] = 16 * s->mvscale[i][0] >> 14;
698  s->mvstep[i][1] = 16 * s->mvscale[i][1] >> 14;
699  }
700  }
701  }
702 
703  if (s->s.h.keyframe || s->s.h.errorres || (s->s.h.intraonly && s->s.h.resetctx == 3)) {
704  s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p =
707  sizeof(ff_vp9_default_coef_probs));
709  sizeof(ff_vp9_default_coef_probs));
711  sizeof(ff_vp9_default_coef_probs));
713  sizeof(ff_vp9_default_coef_probs));
714  } else if (s->s.h.intraonly && s->s.h.resetctx == 2) {
717  sizeof(ff_vp9_default_coef_probs));
718  }
719 
720  // next 16 bits is size of the rest of the header (arith-coded)
721  s->s.h.compressed_header_size = size2 = get_bits(&s->gb, 16);
722  s->s.h.uncompressed_header_size = (get_bits_count(&s->gb) + 7) / 8;
723 
724  data2 = align_get_bits(&s->gb);
725  if (size2 > size - (data2 - data)) {
726  av_log(avctx, AV_LOG_ERROR, "Invalid compressed header size\n");
727  return AVERROR_INVALIDDATA;
728  }
729  ret = ff_vp56_init_range_decoder(&s->c, data2, size2);
730  if (ret < 0)
731  return ret;
732 
733  if (vp56_rac_get_prob_branchy(&s->c, 128)) { // marker bit
734  av_log(avctx, AV_LOG_ERROR, "Marker bit was set\n");
735  return AVERROR_INVALIDDATA;
736  }
737 
738  if (s->s.h.keyframe || s->s.h.intraonly) {
739  memset(s->counts.coef, 0, sizeof(s->counts.coef));
740  memset(s->counts.eob, 0, sizeof(s->counts.eob));
741  } else {
742  memset(&s->counts, 0, sizeof(s->counts));
743  }
744  /* FIXME is it faster to not copy here, but do it down in the fw updates
745  * as explicit copies if the fw update is missing (and skip the copy upon
746  * fw update)? */
747  s->prob.p = s->prob_ctx[c].p;
748 
749  // txfm updates
750  if (s->s.h.lossless) {
751  s->s.h.txfmmode = TX_4X4;
752  } else {
753  s->s.h.txfmmode = vp8_rac_get_uint(&s->c, 2);
754  if (s->s.h.txfmmode == 3)
755  s->s.h.txfmmode += vp8_rac_get(&s->c);
756 
757  if (s->s.h.txfmmode == TX_SWITCHABLE) {
758  for (i = 0; i < 2; i++)
759  if (vp56_rac_get_prob_branchy(&s->c, 252))
760  s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i]);
761  for (i = 0; i < 2; i++)
762  for (j = 0; j < 2; j++)
763  if (vp56_rac_get_prob_branchy(&s->c, 252))
764  s->prob.p.tx16p[i][j] =
765  update_prob(&s->c, s->prob.p.tx16p[i][j]);
766  for (i = 0; i < 2; i++)
767  for (j = 0; j < 3; j++)
768  if (vp56_rac_get_prob_branchy(&s->c, 252))
769  s->prob.p.tx32p[i][j] =
770  update_prob(&s->c, s->prob.p.tx32p[i][j]);
771  }
772  }
773 
774  // coef updates
775  for (i = 0; i < 4; i++) {
776  uint8_t (*ref)[2][6][6][3] = s->prob_ctx[c].coef[i];
777  if (vp8_rac_get(&s->c)) {
778  for (j = 0; j < 2; j++)
779  for (k = 0; k < 2; k++)
780  for (l = 0; l < 6; l++)
781  for (m = 0; m < 6; m++) {
782  uint8_t *p = s->prob.coef[i][j][k][l][m];
783  uint8_t *r = ref[j][k][l][m];
784  if (m >= 3 && l == 0) // dc only has 3 pt
785  break;
786  for (n = 0; n < 3; n++) {
787  if (vp56_rac_get_prob_branchy(&s->c, 252))
788  p[n] = update_prob(&s->c, r[n]);
789  else
790  p[n] = r[n];
791  }
792  p[3] = 0;
793  }
794  } else {
795  for (j = 0; j < 2; j++)
796  for (k = 0; k < 2; k++)
797  for (l = 0; l < 6; l++)
798  for (m = 0; m < 6; m++) {
799  uint8_t *p = s->prob.coef[i][j][k][l][m];
800  uint8_t *r = ref[j][k][l][m];
801  if (m > 3 && l == 0) // dc only has 3 pt
802  break;
803  memcpy(p, r, 3);
804  p[3] = 0;
805  }
806  }
807  if (s->s.h.txfmmode == i)
808  break;
809  }
810 
811  // mode updates
812  for (i = 0; i < 3; i++)
813  if (vp56_rac_get_prob_branchy(&s->c, 252))
814  s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i]);
815  if (!s->s.h.keyframe && !s->s.h.intraonly) {
816  for (i = 0; i < 7; i++)
817  for (j = 0; j < 3; j++)
818  if (vp56_rac_get_prob_branchy(&s->c, 252))
819  s->prob.p.mv_mode[i][j] =
820  update_prob(&s->c, s->prob.p.mv_mode[i][j]);
821 
822  if (s->s.h.filtermode == FILTER_SWITCHABLE)
823  for (i = 0; i < 4; i++)
824  for (j = 0; j < 2; j++)
825  if (vp56_rac_get_prob_branchy(&s->c, 252))
826  s->prob.p.filter[i][j] =
827  update_prob(&s->c, s->prob.p.filter[i][j]);
828 
829  for (i = 0; i < 4; i++)
830  if (vp56_rac_get_prob_branchy(&s->c, 252))
831  s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i]);
832 
833  if (s->s.h.allowcompinter) {
834  s->s.h.comppredmode = vp8_rac_get(&s->c);
835  if (s->s.h.comppredmode)
836  s->s.h.comppredmode += vp8_rac_get(&s->c);
837  if (s->s.h.comppredmode == PRED_SWITCHABLE)
838  for (i = 0; i < 5; i++)
839  if (vp56_rac_get_prob_branchy(&s->c, 252))
840  s->prob.p.comp[i] =
841  update_prob(&s->c, s->prob.p.comp[i]);
842  } else {
844  }
845 
846  if (s->s.h.comppredmode != PRED_COMPREF) {
847  for (i = 0; i < 5; i++) {
848  if (vp56_rac_get_prob_branchy(&s->c, 252))
849  s->prob.p.single_ref[i][0] =
850  update_prob(&s->c, s->prob.p.single_ref[i][0]);
851  if (vp56_rac_get_prob_branchy(&s->c, 252))
852  s->prob.p.single_ref[i][1] =
853  update_prob(&s->c, s->prob.p.single_ref[i][1]);
854  }
855  }
856 
857  if (s->s.h.comppredmode != PRED_SINGLEREF) {
858  for (i = 0; i < 5; i++)
859  if (vp56_rac_get_prob_branchy(&s->c, 252))
860  s->prob.p.comp_ref[i] =
861  update_prob(&s->c, s->prob.p.comp_ref[i]);
862  }
863 
864  for (i = 0; i < 4; i++)
865  for (j = 0; j < 9; j++)
866  if (vp56_rac_get_prob_branchy(&s->c, 252))
867  s->prob.p.y_mode[i][j] =
868  update_prob(&s->c, s->prob.p.y_mode[i][j]);
869 
870  for (i = 0; i < 4; i++)
871  for (j = 0; j < 4; j++)
872  for (k = 0; k < 3; k++)
873  if (vp56_rac_get_prob_branchy(&s->c, 252))
874  s->prob.p.partition[3 - i][j][k] =
875  update_prob(&s->c,
876  s->prob.p.partition[3 - i][j][k]);
877 
878  // mv fields don't use the update_prob subexp model for some reason
879  for (i = 0; i < 3; i++)
880  if (vp56_rac_get_prob_branchy(&s->c, 252))
881  s->prob.p.mv_joint[i] = (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
882 
883  for (i = 0; i < 2; i++) {
884  if (vp56_rac_get_prob_branchy(&s->c, 252))
885  s->prob.p.mv_comp[i].sign =
886  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
887 
888  for (j = 0; j < 10; j++)
889  if (vp56_rac_get_prob_branchy(&s->c, 252))
890  s->prob.p.mv_comp[i].classes[j] =
891  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
892 
893  if (vp56_rac_get_prob_branchy(&s->c, 252))
894  s->prob.p.mv_comp[i].class0 =
895  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
896 
897  for (j = 0; j < 10; j++)
898  if (vp56_rac_get_prob_branchy(&s->c, 252))
899  s->prob.p.mv_comp[i].bits[j] =
900  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
901  }
902 
903  for (i = 0; i < 2; i++) {
904  for (j = 0; j < 2; j++)
905  for (k = 0; k < 3; k++)
906  if (vp56_rac_get_prob_branchy(&s->c, 252))
907  s->prob.p.mv_comp[i].class0_fp[j][k] =
908  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
909 
910  for (j = 0; j < 3; j++)
911  if (vp56_rac_get_prob_branchy(&s->c, 252))
912  s->prob.p.mv_comp[i].fp[j] =
913  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
914  }
915 
916  if (s->s.h.highprecisionmvs) {
917  for (i = 0; i < 2; i++) {
918  if (vp56_rac_get_prob_branchy(&s->c, 252))
919  s->prob.p.mv_comp[i].class0_hp =
920  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
921 
922  if (vp56_rac_get_prob_branchy(&s->c, 252))
923  s->prob.p.mv_comp[i].hp =
924  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
925  }
926  }
927  }
928 
929  return (data2 - data) + size2;
930 }
931 
932 static void decode_sb(AVCodecContext *avctx, int row, int col, VP9Filter *lflvl,
933  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
934 {
935  VP9Context *s = avctx->priv_data;
936  int c = ((s->above_partition_ctx[col] >> (3 - bl)) & 1) |
937  (((s->left_partition_ctx[row & 0x7] >> (3 - bl)) & 1) << 1);
938  const uint8_t *p = s->s.h.keyframe || s->s.h.intraonly ? ff_vp9_default_kf_partition_probs[bl][c] :
939  s->prob.p.partition[bl][c];
940  enum BlockPartition bp;
941  ptrdiff_t hbs = 4 >> bl;
942  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
943  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
944  int bytesperpixel = s->bytesperpixel;
945 
946  if (bl == BL_8X8) {
948  ff_vp9_decode_block(avctx, row, col, lflvl, yoff, uvoff, bl, bp);
949  } else if (col + hbs < s->cols) { // FIXME why not <=?
950  if (row + hbs < s->rows) { // FIXME why not <=?
952  switch (bp) {
953  case PARTITION_NONE:
954  ff_vp9_decode_block(avctx, row, col, lflvl, yoff, uvoff, bl, bp);
955  break;
956  case PARTITION_H:
957  ff_vp9_decode_block(avctx, row, col, lflvl, yoff, uvoff, bl, bp);
958  yoff += hbs * 8 * y_stride;
959  uvoff += hbs * 8 * uv_stride >> s->ss_v;
960  ff_vp9_decode_block(avctx, row + hbs, col, lflvl, yoff, uvoff, bl, bp);
961  break;
962  case PARTITION_V:
963  ff_vp9_decode_block(avctx, row, col, lflvl, yoff, uvoff, bl, bp);
964  yoff += hbs * 8 * bytesperpixel;
965  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
966  ff_vp9_decode_block(avctx, row, col + hbs, lflvl, yoff, uvoff, bl, bp);
967  break;
968  case PARTITION_SPLIT:
969  decode_sb(avctx, row, col, lflvl, yoff, uvoff, bl + 1);
970  decode_sb(avctx, row, col + hbs, lflvl,
971  yoff + 8 * hbs * bytesperpixel,
972  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
973  yoff += hbs * 8 * y_stride;
974  uvoff += hbs * 8 * uv_stride >> s->ss_v;
975  decode_sb(avctx, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
976  decode_sb(avctx, row + hbs, col + hbs, lflvl,
977  yoff + 8 * hbs * bytesperpixel,
978  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
979  break;
980  default:
981  av_assert0(0);
982  }
983  } else if (vp56_rac_get_prob_branchy(&s->c, p[1])) {
984  bp = PARTITION_SPLIT;
985  decode_sb(avctx, row, col, lflvl, yoff, uvoff, bl + 1);
986  decode_sb(avctx, row, col + hbs, lflvl,
987  yoff + 8 * hbs * bytesperpixel,
988  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
989  } else {
990  bp = PARTITION_H;
991  ff_vp9_decode_block(avctx, row, col, lflvl, yoff, uvoff, bl, bp);
992  }
993  } else if (row + hbs < s->rows) { // FIXME why not <=?
994  if (vp56_rac_get_prob_branchy(&s->c, p[2])) {
995  bp = PARTITION_SPLIT;
996  decode_sb(avctx, row, col, lflvl, yoff, uvoff, bl + 1);
997  yoff += hbs * 8 * y_stride;
998  uvoff += hbs * 8 * uv_stride >> s->ss_v;
999  decode_sb(avctx, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1000  } else {
1001  bp = PARTITION_V;
1002  ff_vp9_decode_block(avctx, row, col, lflvl, yoff, uvoff, bl, bp);
1003  }
1004  } else {
1005  bp = PARTITION_SPLIT;
1006  decode_sb(avctx, row, col, lflvl, yoff, uvoff, bl + 1);
1007  }
1008  s->counts.partition[bl][c][bp]++;
1009 }
1010 
1011 static void decode_sb_mem(AVCodecContext *avctx, int row, int col, VP9Filter *lflvl,
1012  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1013 {
1014  VP9Context *s = avctx->priv_data;
1015  VP9Block *b = s->b;
1016  ptrdiff_t hbs = 4 >> bl;
1017  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1018  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1019  int bytesperpixel = s->bytesperpixel;
1020 
1021  if (bl == BL_8X8) {
1022  av_assert2(b->bl == BL_8X8);
1023  ff_vp9_decode_block(avctx, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1024  } else if (s->b->bl == bl) {
1025  ff_vp9_decode_block(avctx, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1026  if (b->bp == PARTITION_H && row + hbs < s->rows) {
1027  yoff += hbs * 8 * y_stride;
1028  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1029  ff_vp9_decode_block(avctx, row + hbs, col, lflvl, yoff, uvoff, b->bl, b->bp);
1030  } else if (b->bp == PARTITION_V && col + hbs < s->cols) {
1031  yoff += hbs * 8 * bytesperpixel;
1032  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1033  ff_vp9_decode_block(avctx, row, col + hbs, lflvl, yoff, uvoff, b->bl, b->bp);
1034  }
1035  } else {
1036  decode_sb_mem(avctx, row, col, lflvl, yoff, uvoff, bl + 1);
1037  if (col + hbs < s->cols) { // FIXME why not <=?
1038  if (row + hbs < s->rows) {
1039  decode_sb_mem(avctx, row, col + hbs, lflvl, yoff + 8 * hbs * bytesperpixel,
1040  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1041  yoff += hbs * 8 * y_stride;
1042  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1043  decode_sb_mem(avctx, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1044  decode_sb_mem(avctx, row + hbs, col + hbs, lflvl,
1045  yoff + 8 * hbs * bytesperpixel,
1046  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1047  } else {
1048  yoff += hbs * 8 * bytesperpixel;
1049  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1050  decode_sb_mem(avctx, row, col + hbs, lflvl, yoff, uvoff, bl + 1);
1051  }
1052  } else if (row + hbs < s->rows) {
1053  yoff += hbs * 8 * y_stride;
1054  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1055  decode_sb_mem(avctx, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1056  }
1057  }
1058 }
1059 
1060 static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
1061 {
1062  int sb_start = ( idx * n) >> log2_n;
1063  int sb_end = ((idx + 1) * n) >> log2_n;
1064  *start = FFMIN(sb_start, n) << 3;
1065  *end = FFMIN(sb_end, n) << 3;
1066 }
1067 
1069 {
1070  av_freep(&s->intra_pred_data[0]);
1071  av_freep(&s->b_base);
1072  av_freep(&s->block_base);
1073 }
1074 
1076 {
1077  VP9Context *s = avctx->priv_data;
1078  int i;
1079 
1080  for (i = 0; i < 3; i++) {
1081  if (s->s.frames[i].tf.f->buf[0])
1082  vp9_frame_unref(avctx, &s->s.frames[i]);
1083  av_frame_free(&s->s.frames[i].tf.f);
1084  }
1085  for (i = 0; i < 8; i++) {
1086  if (s->s.refs[i].f->buf[0])
1087  ff_thread_release_buffer(avctx, &s->s.refs[i]);
1088  av_frame_free(&s->s.refs[i].f);
1089  if (s->next_refs[i].f->buf[0])
1090  ff_thread_release_buffer(avctx, &s->next_refs[i]);
1091  av_frame_free(&s->next_refs[i].f);
1092  }
1093  free_buffers(s);
1094  av_freep(&s->c_b);
1095  s->c_b_size = 0;
1096 
1097  return 0;
1098 }
1099 
1100 
1101 static int vp9_decode_frame(AVCodecContext *avctx, void *frame,
1102  int *got_frame, AVPacket *pkt)
1103 {
1104  const uint8_t *data = pkt->data;
1105  int size = pkt->size;
1106  VP9Context *s = avctx->priv_data;
1107  int ret, tile_row, tile_col, i, ref, row, col;
1108  int retain_segmap_ref = s->s.frames[REF_FRAME_SEGMAP].segmentation_map &&
1110  ptrdiff_t yoff, uvoff, ls_y, ls_uv;
1111  AVFrame *f;
1112  int bytesperpixel;
1113 
1114  if ((ret = decode_frame_header(avctx, data, size, &ref)) < 0) {
1115  return ret;
1116  } else if (ret == 0) {
1117  if (!s->s.refs[ref].f->buf[0]) {
1118  av_log(avctx, AV_LOG_ERROR, "Requested reference %d not available\n", ref);
1119  return AVERROR_INVALIDDATA;
1120  }
1121  if ((ret = av_frame_ref(frame, s->s.refs[ref].f)) < 0)
1122  return ret;
1123  ((AVFrame *)frame)->pts = pkt->pts;
1124 #if FF_API_PKT_PTS
1126  ((AVFrame *)frame)->pkt_pts = pkt->pts;
1128 #endif
1129  ((AVFrame *)frame)->pkt_dts = pkt->dts;
1130  for (i = 0; i < 8; i++) {
1131  if (s->next_refs[i].f->buf[0])
1132  ff_thread_release_buffer(avctx, &s->next_refs[i]);
1133  if (s->s.refs[i].f->buf[0] &&
1134  (ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i])) < 0)
1135  return ret;
1136  }
1137  *got_frame = 1;
1138  return pkt->size;
1139  }
1140  data += ret;
1141  size -= ret;
1142 
1143  if (!retain_segmap_ref || s->s.h.keyframe || s->s.h.intraonly) {
1144  if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0])
1146  if (!s->s.h.keyframe && !s->s.h.intraonly && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
1147  (ret = vp9_frame_ref(avctx, &s->s.frames[REF_FRAME_SEGMAP], &s->s.frames[CUR_FRAME])) < 0)
1148  return ret;
1149  }
1150  if (s->s.frames[REF_FRAME_MVPAIR].tf.f->buf[0])
1152  if (!s->s.h.intraonly && !s->s.h.keyframe && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
1153  (ret = vp9_frame_ref(avctx, &s->s.frames[REF_FRAME_MVPAIR], &s->s.frames[CUR_FRAME])) < 0)
1154  return ret;
1155  if (s->s.frames[CUR_FRAME].tf.f->buf[0])
1156  vp9_frame_unref(avctx, &s->s.frames[CUR_FRAME]);
1157  if ((ret = vp9_frame_alloc(avctx, &s->s.frames[CUR_FRAME])) < 0)
1158  return ret;
1159  f = s->s.frames[CUR_FRAME].tf.f;
1160  f->key_frame = s->s.h.keyframe;
1162  ls_y = f->linesize[0];
1163  ls_uv =f->linesize[1];
1164 
1165  if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0] &&
1169  }
1170 
1171  // ref frame setup
1172  for (i = 0; i < 8; i++) {
1173  if (s->next_refs[i].f->buf[0])
1174  ff_thread_release_buffer(avctx, &s->next_refs[i]);
1175  if (s->s.h.refreshrefmask & (1 << i)) {
1176  ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.frames[CUR_FRAME].tf);
1177  } else if (s->s.refs[i].f->buf[0]) {
1178  ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i]);
1179  }
1180  if (ret < 0)
1181  return ret;
1182  }
1183 
1184  if (avctx->hwaccel) {
1185  ret = avctx->hwaccel->start_frame(avctx, NULL, 0);
1186  if (ret < 0)
1187  return ret;
1188  ret = avctx->hwaccel->decode_slice(avctx, pkt->data, pkt->size);
1189  if (ret < 0)
1190  return ret;
1191  ret = avctx->hwaccel->end_frame(avctx);
1192  if (ret < 0)
1193  return ret;
1194  goto finish;
1195  }
1196 
1197  // main tile decode loop
1198  bytesperpixel = s->bytesperpixel;
1199  memset(s->above_partition_ctx, 0, s->cols);
1200  memset(s->above_skip_ctx, 0, s->cols);
1201  if (s->s.h.keyframe || s->s.h.intraonly) {
1202  memset(s->above_mode_ctx, DC_PRED, s->cols * 2);
1203  } else {
1204  memset(s->above_mode_ctx, NEARESTMV, s->cols);
1205  }
1206  memset(s->above_y_nnz_ctx, 0, s->sb_cols * 16);
1207  memset(s->above_uv_nnz_ctx[0], 0, s->sb_cols * 16 >> s->ss_h);
1208  memset(s->above_uv_nnz_ctx[1], 0, s->sb_cols * 16 >> s->ss_h);
1209  memset(s->above_segpred_ctx, 0, s->cols);
1210  s->pass = s->s.frames[CUR_FRAME].uses_2pass =
1212  if ((ret = update_block_buffers(avctx)) < 0) {
1213  av_log(avctx, AV_LOG_ERROR,
1214  "Failed to allocate block buffers\n");
1215  return ret;
1216  }
1217  if (s->s.h.refreshctx && s->s.h.parallelmode) {
1218  int j, k, l, m;
1219 
1220  for (i = 0; i < 4; i++) {
1221  for (j = 0; j < 2; j++)
1222  for (k = 0; k < 2; k++)
1223  for (l = 0; l < 6; l++)
1224  for (m = 0; m < 6; m++)
1225  memcpy(s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m],
1226  s->prob.coef[i][j][k][l][m], 3);
1227  if (s->s.h.txfmmode == i)
1228  break;
1229  }
1230  s->prob_ctx[s->s.h.framectxid].p = s->prob.p;
1231  ff_thread_finish_setup(avctx);
1232  } else if (!s->s.h.refreshctx) {
1233  ff_thread_finish_setup(avctx);
1234  }
1235 
1236  do {
1237  yoff = uvoff = 0;
1238  s->b = s->b_base;
1239  s->block = s->block_base;
1240  s->uvblock[0] = s->uvblock_base[0];
1241  s->uvblock[1] = s->uvblock_base[1];
1242  s->eob = s->eob_base;
1243  s->uveob[0] = s->uveob_base[0];
1244  s->uveob[1] = s->uveob_base[1];
1245 
1246  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1248  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1249  if (s->pass != 2) {
1250  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1251  int64_t tile_size;
1252 
1253  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1254  tile_row == s->s.h.tiling.tile_rows - 1) {
1255  tile_size = size;
1256  } else {
1257  tile_size = AV_RB32(data);
1258  data += 4;
1259  size -= 4;
1260  }
1261  if (tile_size > size) {
1262  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1263  return AVERROR_INVALIDDATA;
1264  }
1265  ret = ff_vp56_init_range_decoder(&s->c_b[tile_col], data, tile_size);
1266  if (ret < 0)
1267  return ret;
1268  if (vp56_rac_get_prob_branchy(&s->c_b[tile_col], 128)) { // marker bit
1269  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1270  return AVERROR_INVALIDDATA;
1271  }
1272  data += tile_size;
1273  size -= tile_size;
1274  }
1275  }
1276 
1277  for (row = s->tile_row_start; row < s->tile_row_end;
1278  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1279  VP9Filter *lflvl_ptr = s->lflvl;
1280  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1281 
1282  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1284  tile_col, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1285 
1286  if (s->pass != 2) {
1287  memset(s->left_partition_ctx, 0, 8);
1288  memset(s->left_skip_ctx, 0, 8);
1289  if (s->s.h.keyframe || s->s.h.intraonly) {
1290  memset(s->left_mode_ctx, DC_PRED, 16);
1291  } else {
1292  memset(s->left_mode_ctx, NEARESTMV, 8);
1293  }
1294  memset(s->left_y_nnz_ctx, 0, 16);
1295  memset(s->left_uv_nnz_ctx, 0, 32);
1296  memset(s->left_segpred_ctx, 0, 8);
1297 
1298  memcpy(&s->c, &s->c_b[tile_col], sizeof(s->c));
1299  }
1300 
1301  for (col = s->tile_col_start;
1302  col < s->tile_col_end;
1303  col += 8, yoff2 += 64 * bytesperpixel,
1304  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1305  // FIXME integrate with lf code (i.e. zero after each
1306  // use, similar to invtxfm coefficients, or similar)
1307  if (s->pass != 1) {
1308  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1309  }
1310 
1311  if (s->pass == 2) {
1312  decode_sb_mem(avctx, row, col, lflvl_ptr,
1313  yoff2, uvoff2, BL_64X64);
1314  } else {
1315  decode_sb(avctx, row, col, lflvl_ptr,
1316  yoff2, uvoff2, BL_64X64);
1317  }
1318  }
1319  if (s->pass != 2)
1320  memcpy(&s->c_b[tile_col], &s->c, sizeof(s->c));
1321  }
1322 
1323  if (s->pass == 1)
1324  continue;
1325 
1326  // backup pre-loopfilter reconstruction data for intra
1327  // prediction of next row of sb64s
1328  if (row + 8 < s->rows) {
1329  memcpy(s->intra_pred_data[0],
1330  f->data[0] + yoff + 63 * ls_y,
1331  8 * s->cols * bytesperpixel);
1332  memcpy(s->intra_pred_data[1],
1333  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1334  8 * s->cols * bytesperpixel >> s->ss_h);
1335  memcpy(s->intra_pred_data[2],
1336  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1337  8 * s->cols * bytesperpixel >> s->ss_h);
1338  }
1339 
1340  // loopfilter one row
1341  if (s->s.h.filter.level) {
1342  yoff2 = yoff;
1343  uvoff2 = uvoff;
1344  lflvl_ptr = s->lflvl;
1345  for (col = 0; col < s->cols;
1346  col += 8, yoff2 += 64 * bytesperpixel,
1347  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1348  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, row, col,
1349  yoff2, uvoff2);
1350  }
1351  }
1352 
1353  // FIXME maybe we can make this more finegrained by running the
1354  // loopfilter per-block instead of after each sbrow
1355  // In fact that would also make intra pred left preparation easier?
1356  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, row >> 3, 0);
1357  }
1358  }
1359 
1360  if (s->pass < 2 && s->s.h.refreshctx && !s->s.h.parallelmode) {
1361  ff_vp9_adapt_probs(s);
1362  ff_thread_finish_setup(avctx);
1363  }
1364  } while (s->pass++ == 1);
1365  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1366 
1367 finish:
1368  // ref frame setup
1369  for (i = 0; i < 8; i++) {
1370  if (s->s.refs[i].f->buf[0])
1371  ff_thread_release_buffer(avctx, &s->s.refs[i]);
1372  if (s->next_refs[i].f->buf[0] &&
1373  (ret = ff_thread_ref_frame(&s->s.refs[i], &s->next_refs[i])) < 0)
1374  return ret;
1375  }
1376 
1377  if (!s->s.h.invisible) {
1378  if ((ret = av_frame_ref(frame, s->s.frames[CUR_FRAME].tf.f)) < 0)
1379  return ret;
1380  *got_frame = 1;
1381  }
1382 
1383  return pkt->size;
1384 }
1385 
1387 {
1388  VP9Context *s = avctx->priv_data;
1389  int i;
1390 
1391  for (i = 0; i < 3; i++)
1392  vp9_frame_unref(avctx, &s->s.frames[i]);
1393  for (i = 0; i < 8; i++)
1394  ff_thread_release_buffer(avctx, &s->s.refs[i]);
1395 }
1396 
1397 static int init_frames(AVCodecContext *avctx)
1398 {
1399  VP9Context *s = avctx->priv_data;
1400  int i;
1401 
1402  for (i = 0; i < 3; i++) {
1403  s->s.frames[i].tf.f = av_frame_alloc();
1404  if (!s->s.frames[i].tf.f) {
1405  vp9_decode_free(avctx);
1406  av_log(avctx, AV_LOG_ERROR, "Failed to allocate frame buffer %d\n", i);
1407  return AVERROR(ENOMEM);
1408  }
1409  }
1410  for (i = 0; i < 8; i++) {
1411  s->s.refs[i].f = av_frame_alloc();
1412  s->next_refs[i].f = av_frame_alloc();
1413  if (!s->s.refs[i].f || !s->next_refs[i].f) {
1414  vp9_decode_free(avctx);
1415  av_log(avctx, AV_LOG_ERROR, "Failed to allocate frame buffer %d\n", i);
1416  return AVERROR(ENOMEM);
1417  }
1418  }
1419 
1420  return 0;
1421 }
1422 
1424 {
1425  VP9Context *s = avctx->priv_data;
1426 
1427  avctx->internal->allocate_progress = 1;
1428  s->last_bpp = 0;
1429  s->s.h.filter.sharpness = -1;
1430 
1431  return init_frames(avctx);
1432 }
1433 
1434 #if HAVE_THREADS
1435 static av_cold int vp9_decode_init_thread_copy(AVCodecContext *avctx)
1436 {
1437  return init_frames(avctx);
1438 }
1439 
1440 static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
1441 {
1442  int i, ret;
1443  VP9Context *s = dst->priv_data, *ssrc = src->priv_data;
1444 
1445  for (i = 0; i < 3; i++) {
1446  if (s->s.frames[i].tf.f->buf[0])
1447  vp9_frame_unref(dst, &s->s.frames[i]);
1448  if (ssrc->s.frames[i].tf.f->buf[0]) {
1449  if ((ret = vp9_frame_ref(dst, &s->s.frames[i], &ssrc->s.frames[i])) < 0)
1450  return ret;
1451  }
1452  }
1453  for (i = 0; i < 8; i++) {
1454  if (s->s.refs[i].f->buf[0])
1455  ff_thread_release_buffer(dst, &s->s.refs[i]);
1456  if (ssrc->next_refs[i].f->buf[0]) {
1457  if ((ret = ff_thread_ref_frame(&s->s.refs[i], &ssrc->next_refs[i])) < 0)
1458  return ret;
1459  }
1460  }
1461 
1462  s->s.h.invisible = ssrc->s.h.invisible;
1463  s->s.h.keyframe = ssrc->s.h.keyframe;
1464  s->s.h.intraonly = ssrc->s.h.intraonly;
1465  s->ss_v = ssrc->ss_v;
1466  s->ss_h = ssrc->ss_h;
1467  s->s.h.segmentation.enabled = ssrc->s.h.segmentation.enabled;
1468  s->s.h.segmentation.update_map = ssrc->s.h.segmentation.update_map;
1469  s->s.h.segmentation.absolute_vals = ssrc->s.h.segmentation.absolute_vals;
1470  s->bytesperpixel = ssrc->bytesperpixel;
1471  s->gf_fmt = ssrc->gf_fmt;
1472  s->w = ssrc->w;
1473  s->h = ssrc->h;
1474  s->s.h.bpp = ssrc->s.h.bpp;
1475  s->bpp_index = ssrc->bpp_index;
1476  s->pix_fmt = ssrc->pix_fmt;
1477  memcpy(&s->prob_ctx, &ssrc->prob_ctx, sizeof(s->prob_ctx));
1478  memcpy(&s->s.h.lf_delta, &ssrc->s.h.lf_delta, sizeof(s->s.h.lf_delta));
1479  memcpy(&s->s.h.segmentation.feat, &ssrc->s.h.segmentation.feat,
1480  sizeof(s->s.h.segmentation.feat));
1481 
1482  return 0;
1483 }
1484 #endif
1485 
1487  .name = "vp9",
1488  .long_name = NULL_IF_CONFIG_SMALL("Google VP9"),
1489  .type = AVMEDIA_TYPE_VIDEO,
1490  .id = AV_CODEC_ID_VP9,
1491  .priv_data_size = sizeof(VP9Context),
1492  .init = vp9_decode_init,
1493  .close = vp9_decode_free,
1495  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1497  .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp9_decode_init_thread_copy),
1498  .update_thread_context = ONLY_IF_THREADS_ENABLED(vp9_decode_update_thread_context),
1500 };
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:468
ThreadFrame tf
Definition: vp9shared.h:60
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
struct VP9BitstreamHeader::@154 lf_delta
#define NULL
Definition: coverity.c:32
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:3912
const char * s
Definition: avisynth_c.h:768
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
uint8_t * segmentation_map
Definition: vp9shared.h:62
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:365
uint8_t parallelmode
Definition: vp9shared.h:108
const uint8_t ff_vp9_default_kf_partition_probs[4][4][3]
Definition: vp9data.c:41
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
static void vp9_decode_flush(AVCodecContext *avctx)
Definition: vp9.c:1386
This structure describes decoded (raw) audio or video data.
Definition: frame.h:194
static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
Definition: vp9.c:1060
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
uint8_t allowcompinter
Definition: vp9shared.h:106
static void flush(AVCodecContext *avctx)
uint8_t left_segpred_ctx[8]
Definition: vp9dec.h:164
VP5 and VP6 compatible video decoder (common features)
uint8_t update_map
Definition: vp9shared.h:133
static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
Definition: vp9.c:257
uint8_t * above_skip_ctx
Definition: vp9dec.h:174
uint8_t * eob_base
Definition: vp9dec.h:191
uint8_t mvstep[3][2]
Definition: vp9dec.h:196
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:67
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:261
static int init_thread_copy(AVCodecContext *avctx)
Definition: tta.c:392
AVFrame * f
Definition: thread.h:36
static av_always_inline int vp8_rac_get_tree(VP56RangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
Definition: vp56.h:380
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:211
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:386
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:472
VP9BitstreamHeader h
Definition: vp9shared.h:160
VideoDSPContext vdsp
Definition: vp9dec.h:91
static av_cold int vp9_decode_init(AVCodecContext *avctx)
Definition: vp9.c:1423
ProbContext p
Definition: vp9dec.h:120
uint8_t last_keyframe
Definition: vp9dec.h:104
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2482
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:180
uint8_t ss_v
Definition: vp9dec.h:102
int size
Definition: avcodec.h:1671
const char * b
Definition: vf_curves.c:113
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:379
uint8_t prob[7]
Definition: vp9shared.h:134
uint8_t tx32p[2][3]
Definition: vp9dec.h:53
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:473
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1973
uint8_t left_uv_nnz_ctx[2][16]
Definition: vp9dec.h:160
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:367
uint8_t framectxid
Definition: vp9shared.h:109
const int16_t ff_vp9_dc_qlookup[3][256]
Definition: vp9data.c:231
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:222
VP9Filter * lflvl
Definition: vp9dec.h:185
static AVPacket pkt
unsigned cols
Definition: vp9dec.h:111
#define src
Definition: vp8dsp.c:254
unsigned tile_col_end
Definition: vp9dec.h:118
int profile
profile
Definition: avcodec.h:3248
AVCodec.
Definition: avcodec.h:3721
uint8_t comp_ref[5]
Definition: vp9dec.h:52
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
Definition: pixfmt.h:467
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:42
const int16_t ff_vp9_ac_qlookup[3][256]
Definition: vp9data.c:334
functionally identical to above
Definition: pixfmt.h:474
unsigned log2_tile_rows
Definition: vp9shared.h:151
uint8_t * intra_pred_data[3]
Definition: vp9dec.h:184
int uncompressed_header_size
Definition: vp9shared.h:155
enum FilterMode filtermode
Definition: vp9shared.h:105
struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:3065
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
uint8_t coef[4][2][2][6][6][3]
Definition: vp9dec.h:121
#define VP9_SYNCCODE
Definition: vp9.c:37
uint8_t bits
Definition: crc.c:296
uint8_t
const int8_t ff_vp9_partition_tree[3][2]
Definition: vp9data.c:35
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
uint8_t absolute_vals
Definition: vp9shared.h:132
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
uint8_t varcompref[2]
Definition: vp9shared.h:114
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:466
void ff_vp9_adapt_probs(VP9Context *s)
Definition: vp9prob.c:46
static void free_buffers(VP9Context *s)
Definition: vp9.c:1068
VP9Frame frames[3]
Definition: vp9shared.h:166
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
Multithreading support functions.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:392
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:3564
uint8_t * uveob_base[2]
Definition: vp9dec.h:191
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:87
static AVFrame * frame
static int update_size(AVCodecContext *avctx, int w, int h)
Definition: vp9.c:114
AVBufferRef * hwaccel_priv_buf
Definition: vp9shared.h:66
static void finish(void)
Definition: movenc.c:344
uint8_t * data
Definition: avcodec.h:1670
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:199
AVBufferRef * extradata
Definition: vp9shared.h:61
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
Definition: utils.c:2060
uint8_t skip[3]
Definition: vp9dec.h:56
bitstream reader API header.
struct VP9BitstreamHeader::@155 segmentation
uint8_t * above_uv_nnz_ctx[2]
Definition: vp9dec.h:173
VP9DSPContext dsp
Definition: vp9dec.h:90
uint8_t lim_lut[64]
Definition: vp9dec.h:115
ptrdiff_t size
Definition: opengl_enc.c:101
enum CompPredMode comppredmode
Definition: vp9shared.h:149
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:368
unsigned log2_tile_cols
Definition: vp9shared.h:151
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
uint16_t mvscale[3][2]
Definition: vp9dec.h:195
struct VP9BitstreamHeader::@155::@157 feat[MAX_SEGMENT]
uint8_t refidx[3]
Definition: vp9shared.h:111
uint8_t * above_txfm_ctx
Definition: vp9dec.h:175
av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact)
Definition: vp9dsp.c:84
int h
Definition: vp9dec.h:109
#define av_log(a,...)
int16_t * block
Definition: vp9dec.h:190
static void decode_sb_mem(AVCodecContext *avctx, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1011
uint8_t bytesperpixel
Definition: vp9dec.h:103
void ff_vp9_loopfilter_sb(AVCodecContext *avctx, VP9Filter *lflvl, int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff)
Definition: vp9lpf.c:178
uint8_t mask[2][2][8][4]
Definition: vp9dec.h:74
static av_cold int vp9_decode_free(AVCodecContext *avctx)
Definition: vp9.c:1075
int16_t * uvblock[2]
Definition: vp9dec.h:190
Definition: vp9.h:28
uint8_t partition[4][4][3]
Definition: vp9dec.h:68
uint8_t hp
Definition: vp9dec.h:66
struct VP9Context::@149 prob
int width
Definition: frame.h:252
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
uint8_t sign
Definition: vp9dec.h:59
static int decode_frame_header(AVCodecContext *avctx, const uint8_t *data, int size, int *ref)
Definition: vp9.c:393
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
uint8_t left_partition_ctx[8]
Definition: vp9dec.h:161
unsigned tile_cols
Definition: vp9shared.h:152
#define AVERROR(e)
Definition: error.h:43
GetBitContext gb
Definition: vp9dec.h:92
uint8_t fp[3]
Definition: vp9dec.h:64
uint8_t signbias[3]
Definition: vp9shared.h:112
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:163
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:179
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:3193
uint8_t refreshctx
Definition: vp9shared.h:107
const char * r
Definition: vf_curves.c:111
unsigned tile_row_start
Definition: vp9dec.h:118
uint8_t bpp_index
Definition: vp9dec.h:103
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1840
void * hwaccel_picture_private
Definition: vp9shared.h:67
simple assert() macros that are a bit more flexible than ISO C assert().
const char * name
Name of the codec implementation.
Definition: avcodec.h:3728
uint8_t intra[4]
Definition: vp9dec.h:49
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:366
int16_t * uvblock_base[2]
Definition: vp9dec.h:190
#define FFMAX(a, b)
Definition: common.h:94
#define fail()
Definition: checkasm.h:95
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1063
uint8_t * above_filter_ctx
Definition: vp9dec.h:180
#define REF_FRAME_MVPAIR
Definition: vp9shared.h:164
struct VP9BitstreamHeader::@156 tiling
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:66
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:218
uint8_t comp[5]
Definition: vp9dec.h:50
unsigned c_b_size
Definition: vp9dec.h:95
int ff_vp56_init_range_decoder(VP56RangeCoder *c, const uint8_t *buf, int buf_size)
Definition: vp56rac.c:40
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:277
int uses_2pass
Definition: vp9shared.h:64
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:927
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:3185
#define FFMIN(a, b)
Definition: common.h:96
uint8_t left_y_nnz_ctx[16]
Definition: vp9dec.h:157
enum TxfmMode txfmmode
Definition: vp9shared.h:148
uint8_t keyframe
Definition: vp9shared.h:98
unsigned tile_rows
Definition: vp9shared.h:152
uint8_t left_mode_ctx[16]
Definition: vp9dec.h:158
unsigned eob[4][2][2][6][6][2]
Definition: vp9dec.h:153
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:477
uint8_t tx16p[2][2]
Definition: vp9dec.h:54
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
uint8_t class0_hp
Definition: vp9dec.h:65
VP9SharedContext s
Definition: vp9dec.h:88
struct VP9Context::@147 filter_lut
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
Definition: mem.c:450
struct VP9BitstreamHeader::@153 filter
uint8_t * above_partition_ctx
Definition: vp9dec.h:169
int n
Definition: avisynth_c.h:684
uint8_t mv_mode[7][3]
Definition: vp9dec.h:48
uint8_t fixcompref
Definition: vp9shared.h:113
uint8_t * above_segpred_ctx
Definition: vp9dec.h:176
#define FF_ARRAY_ELEMS(a)
unsigned tile_col_start
Definition: vp9dec.h:118
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:489
unsigned rows
Definition: vp9dec.h:111
unsigned sb_cols
Definition: vp9dec.h:111
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:267
struct ProbContext::@146 mv_comp[2]
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
Definition: vp56.h:271
static int init_frames(AVCodecContext *avctx)
Definition: vp9.c:1397
VP56mv(* above_mv_ctx)[2]
Definition: vp9dec.h:181
Libavcodec external API header.
int16_t * block_base
Definition: vp9dec.h:190
BlockLevel
Definition: vp9shared.h:70
uint8_t filter[4][2]
Definition: vp9dec.h:47
uint8_t class0_fp[2][3]
Definition: vp9dec.h:63
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:225
int pass
Definition: vp9dec.h:97
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:456
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
uint8_t left_skip_ctx[8]
Definition: vp9dec.h:162
main external API structure.
Definition: avcodec.h:1745
uint8_t * data
The data buffer.
Definition: buffer.h:89
#define REF_FRAME_SEGMAP
Definition: vp9shared.h:165
int8_t mode[2]
Definition: vp9shared.h:122
#define CUR_FRAME
Definition: vp9shared.h:163
static int update_block_buffers(AVCodecContext *avctx)
Definition: vp9.c:214
VP56RangeCoder * c_b
Definition: vp9dec.h:94
static int vp9_frame_ref(AVCodecContext *avctx, VP9Frame *dst, VP9Frame *src)
Definition: vp9.c:84
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:313
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:363
unsigned partition[4][4][4]
Definition: vp9dec.h:151
uint8_t * above_y_nnz_ctx
Definition: vp9dec.h:172
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:306
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2475
uint8_t tx8p[2]
Definition: vp9dec.h:55
uint8_t ss_h
Definition: vp9dec.h:102
uint8_t y_mode[4][9]
Definition: vp9dec.h:45
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:369
enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
uint8_t last_bpp
Definition: vp9dec.h:103
static int read_colorspace_details(AVCodecContext *avctx)
Definition: vp9.c:331
uint8_t * above_intra_ctx
Definition: vp9dec.h:177
int allocate_progress
Whether to allocate progress for frame threading.
Definition: internal.h:142
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:346
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer. ...
Definition: pixfmt.h:148
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:266
enum BlockPartition bp
Definition: vp9dec.h:84
static int vp8_rac_get_uint(VP56RangeCoder *c, int bits)
Definition: vp56.h:324
VP9Block * b
Definition: vp9dec.h:96
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:380
uint8_t * uveob[2]
Definition: vp9dec.h:191
uint8_t * above_mode_ctx
Definition: vp9dec.h:170
uint8_t single_ref[5][2]
Definition: vp9dec.h:51
Definition: vp56.h:66
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:364
static void vp9_frame_unref(AVCodecContext *avctx, VP9Frame *f)
Definition: vp9.c:39
static int vp9_decode_frame(AVCodecContext *avctx, void *frame, int *got_frame, AVPacket *pkt)
Definition: vp9.c:1101
uint8_t bits[10]
Definition: vp9dec.h:62
ThreadFrame next_refs[8]
Definition: vp9dec.h:112
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:370
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:208
Definition: vp9.h:48
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:327
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:488
static av_always_inline int inv_recenter_nonneg(int v, int m)
Definition: vp9.c:263
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:83
common internal api header.
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
#define assign(var, type, n)
int w
Definition: vp9dec.h:109
static double c[64]
enum AVPixelFormat pix_fmt last_fmt gf_fmt
Definition: vp9dec.h:110
AVCodec ff_vp9_decoder
Definition: vp9.c:1486
static void decode_sb(AVCodecContext *avctx, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:932
unsigned sb_rows
Definition: vp9dec.h:111
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
unsigned properties
Definition: avcodec.h:3563
static av_always_inline int vp8_rac_get(VP56RangeCoder *c)
Definition: vp56.h:308
Core video DSP helper functions.
uint8_t mv_joint[3]
Definition: vp9dec.h:57
enum BlockLevel bl
Definition: vp9dec.h:83
void * priv_data
Definition: avcodec.h:1787
#define HWACCEL_MAX
uint8_t class0
Definition: vp9dec.h:61
#define av_free(p)
static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
Definition: vp9.c:48
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
unsigned tile_row_end
Definition: vp9dec.h:118
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
Definition: avcodec.h:3946
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1795
static int decode012(GetBitContext *gb)
Definition: get_bits.h:569
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:272
VP9mvrefPair * mv
Definition: vp9shared.h:63
struct VP9Context::@148 prob_ctx[4]
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:464
struct VP9Context::@150 counts
uint8_t invisible
Definition: vp9shared.h:99
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1669
uint8_t use_last_frame_mvs
Definition: vp9shared.h:110
int height
Definition: frame.h:252
ThreadFrame refs[8]
Definition: vp9shared.h:162
uint8_t pred_prob[3]
Definition: vp9shared.h:135
#define av_freep(p)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:100
VP9Block * b_base
Definition: vp9dec.h:96
void INT64 start
Definition: avisynth_c.h:690
#define av_always_inline
Definition: attributes.h:39
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:243
static int update_prob(VP56RangeCoder *c, int p)
Definition: vp9.c:273
#define av_malloc_array(a, b)
const ProbContext ff_vp9_default_probs
Definition: vp9data.c:1435
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2253
const AVProfile ff_vp9_profiles[]
Definition: profiles.c:135
uint8_t * above_ref_ctx
Definition: vp9dec.h:179
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:3926
BlockPartition
Definition: vp9shared.h:34
uint8_t classes[10]
Definition: vp9dec.h:60
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:3937
const uint8_t ff_vp9_default_coef_probs[4][2][2][6][6][3]
Definition: vp9data.c:1540
uint8_t highprecisionmvs
Definition: vp9shared.h:104
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
This structure stores compressed data.
Definition: avcodec.h:1647
void ff_vp9_decode_block(AVCodecContext *avctx, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl, enum BlockPartition bp)
Definition: vp9block.c:1267
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1395
uint8_t * above_comp_ctx
Definition: vp9dec.h:178
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:1000
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1663
int block_alloc_using_2pass
Definition: vp9dec.h:189
Predicted.
Definition: avutil.h:275
int compressed_header_size
Definition: vp9shared.h:156
uint8_t refreshrefmask
Definition: vp9shared.h:103
VP56RangeCoder c
Definition: vp9dec.h:93