FFmpeg
vp9.c
Go to the documentation of this file.
1 /*
2  * VP9 compatible video decoder
3  *
4  * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5  * Copyright (C) 2013 Clément Bœsch <u pkh me>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "config_components.h"
25 
26 #include "avcodec.h"
27 #include "codec_internal.h"
28 #include "decode.h"
29 #include "get_bits.h"
30 #include "hwaccel_internal.h"
31 #include "hwconfig.h"
32 #include "profiles.h"
33 #include "progressframe.h"
34 #include "libavutil/refstruct.h"
35 #include "thread.h"
36 #include "pthread_internal.h"
37 
38 #include "videodsp.h"
39 #include "vp89_rac.h"
40 #include "vp9.h"
41 #include "vp9data.h"
42 #include "vp9dec.h"
43 #include "vpx_rac.h"
44 #include "libavutil/attributes.h"
45 #include "libavutil/avassert.h"
46 #include "libavutil/mem.h"
47 #include "libavutil/pixdesc.h"
49 
50 #define VP9_SYNCCODE 0x498342
51 
52 #if HAVE_THREADS
53 DEFINE_OFFSET_ARRAY(VP9Context, vp9_context, pthread_init_cnt,
54  (offsetof(VP9Context, progress_mutex)),
55  (offsetof(VP9Context, progress_cond)));
56 
57 static int vp9_alloc_entries(AVCodecContext *avctx, int n) {
58  VP9Context *s = avctx->priv_data;
59 
60  if (avctx->active_thread_type & FF_THREAD_SLICE) {
61  if (s->entries)
62  av_freep(&s->entries);
63 
64  s->entries = av_malloc_array(n, sizeof(atomic_int));
65  if (!s->entries)
66  return AVERROR(ENOMEM);
67  }
68  return 0;
69 }
70 
71 static void vp9_report_tile_progress(VP9Context *s, int field, int n) {
72  pthread_mutex_lock(&s->progress_mutex);
73  atomic_fetch_add_explicit(&s->entries[field], n, memory_order_release);
74  pthread_cond_signal(&s->progress_cond);
75  pthread_mutex_unlock(&s->progress_mutex);
76 }
77 
78 static void vp9_await_tile_progress(VP9Context *s, int field, int n) {
79  if (atomic_load_explicit(&s->entries[field], memory_order_acquire) >= n)
80  return;
81 
82  pthread_mutex_lock(&s->progress_mutex);
83  while (atomic_load_explicit(&s->entries[field], memory_order_relaxed) != n)
84  pthread_cond_wait(&s->progress_cond, &s->progress_mutex);
85  pthread_mutex_unlock(&s->progress_mutex);
86 }
87 #else
88 static int vp9_alloc_entries(AVCodecContext *avctx, int n) { return 0; }
89 #endif
90 
92 {
93  av_freep(&td->b_base);
94  av_freep(&td->block_base);
96 }
97 
98 static void vp9_frame_unref(VP9Frame *f)
99 {
101  av_refstruct_unref(&f->header_ref);
102  av_refstruct_unref(&f->extradata);
103  av_refstruct_unref(&f->hwaccel_picture_private);
104  f->segmentation_map = NULL;
105 }
106 
108 {
109  VP9Context *s = avctx->priv_data;
110  int ret, sz;
111 
113  if (ret < 0)
114  return ret;
115 
116  sz = 64 * s->sb_cols * s->sb_rows;
117  if (sz != s->frame_extradata_pool_size) {
118  av_refstruct_pool_uninit(&s->frame_extradata_pool);
119  s->frame_extradata_pool = av_refstruct_pool_alloc(sz * (1 + sizeof(VP9mvrefPair)),
121  if (!s->frame_extradata_pool) {
122  s->frame_extradata_pool_size = 0;
123  ret = AVERROR(ENOMEM);
124  goto fail;
125  }
126  s->frame_extradata_pool_size = sz;
127  }
128  f->extradata = av_refstruct_pool_get(s->frame_extradata_pool);
129  if (!f->extradata) {
130  ret = AVERROR(ENOMEM);
131  goto fail;
132  }
133 
134  f->segmentation_map = f->extradata;
135  f->mv = (VP9mvrefPair *) ((char*)f->extradata + sz);
136 
137  ret = ff_hwaccel_frame_priv_alloc(avctx, &f->hwaccel_picture_private);
138  if (ret < 0)
139  goto fail;
140 
141  return 0;
142 
143 fail:
145  return ret;
146 }
147 
149 {
150  av_refstruct_replace(&dst->header_ref, src->header_ref);
151  dst->frame_header = src->frame_header;
152 
153  ff_progress_frame_replace(&dst->tf, &src->tf);
154 
155  av_refstruct_replace(&dst->extradata, src->extradata);
156 
157  dst->segmentation_map = src->segmentation_map;
158  dst->mv = src->mv;
159  dst->uses_2pass = src->uses_2pass;
160 
161  av_refstruct_replace(&dst->hwaccel_picture_private,
162  src->hwaccel_picture_private);
163 }
164 
165 static int update_size(AVCodecContext *avctx, int w, int h)
166 {
167 #define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + \
168  CONFIG_VP9_D3D11VA_HWACCEL * 2 + \
169  CONFIG_VP9_D3D12VA_HWACCEL + \
170  CONFIG_VP9_NVDEC_HWACCEL + \
171  CONFIG_VP9_VAAPI_HWACCEL + \
172  CONFIG_VP9_VDPAU_HWACCEL + \
173  CONFIG_VP9_VIDEOTOOLBOX_HWACCEL + \
174  CONFIG_VP9_VULKAN_HWACCEL)
175  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
176  VP9Context *s = avctx->priv_data;
177  uint8_t *p;
178  int bytesperpixel = s->bytesperpixel, ret, cols, rows;
179  int lflvl_len, i;
180  int changed = 0;
181 
182  av_assert0(w > 0 && h > 0);
183 
184  if (!(s->pix_fmt == s->gf_fmt && w == s->w && h == s->h)) {
185  changed = 1;
186  if ((ret = ff_set_dimensions(avctx, w, h)) < 0)
187  return ret;
188 
189  switch (s->pix_fmt) {
190  case AV_PIX_FMT_YUV420P:
192 #if CONFIG_VP9_DXVA2_HWACCEL
193  *fmtp++ = AV_PIX_FMT_DXVA2_VLD;
194 #endif
195 #if CONFIG_VP9_D3D11VA_HWACCEL
196  *fmtp++ = AV_PIX_FMT_D3D11VA_VLD;
197  *fmtp++ = AV_PIX_FMT_D3D11;
198 #endif
199 #if CONFIG_VP9_D3D12VA_HWACCEL
200  *fmtp++ = AV_PIX_FMT_D3D12;
201 #endif
202 #if CONFIG_VP9_NVDEC_HWACCEL
203  *fmtp++ = AV_PIX_FMT_CUDA;
204 #endif
205 #if CONFIG_VP9_VAAPI_HWACCEL
206  *fmtp++ = AV_PIX_FMT_VAAPI;
207 #endif
208 #if CONFIG_VP9_VDPAU_HWACCEL
209  *fmtp++ = AV_PIX_FMT_VDPAU;
210 #endif
211 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
212  *fmtp++ = AV_PIX_FMT_VIDEOTOOLBOX;
213 #endif
214 #if CONFIG_VP9_VULKAN_HWACCEL
215  *fmtp++ = AV_PIX_FMT_VULKAN;
216 #endif
217  break;
219 #if CONFIG_VP9_NVDEC_HWACCEL
220  *fmtp++ = AV_PIX_FMT_CUDA;
221 #endif
222 #if CONFIG_VP9_VAAPI_HWACCEL
223  *fmtp++ = AV_PIX_FMT_VAAPI;
224 #endif
225 #if CONFIG_VP9_VDPAU_HWACCEL
226  *fmtp++ = AV_PIX_FMT_VDPAU;
227 #endif
228 #if CONFIG_VP9_VULKAN_HWACCEL
229  *fmtp++ = AV_PIX_FMT_VULKAN;
230 #endif
231  break;
232  case AV_PIX_FMT_YUV444P:
235 #if CONFIG_VP9_VAAPI_HWACCEL
236  *fmtp++ = AV_PIX_FMT_VAAPI;
237 #endif
238 #if CONFIG_VP9_VULKAN_HWACCEL
239  *fmtp++ = AV_PIX_FMT_VULKAN;
240 #endif
241  break;
242  case AV_PIX_FMT_GBRP:
243  case AV_PIX_FMT_GBRP10:
244  case AV_PIX_FMT_GBRP12:
245 #if CONFIG_VP9_VAAPI_HWACCEL
246  *fmtp++ = AV_PIX_FMT_VAAPI;
247 #endif
248 #if CONFIG_VP9_VULKAN_HWACCEL
249  *fmtp++ = AV_PIX_FMT_VULKAN;
250 #endif
251  break;
252  }
253 
254  *fmtp++ = s->pix_fmt;
255  *fmtp = AV_PIX_FMT_NONE;
256 
257  ret = ff_get_format(avctx, pix_fmts);
258  if (ret < 0) {
259  ff_set_dimensions(avctx, s->w, s->h);
260  return ret;
261  }
262 
263  avctx->pix_fmt = ret;
264  s->gf_fmt = s->pix_fmt;
265  s->w = w;
266  s->h = h;
267  }
268 
269  cols = (w + 7) >> 3;
270  rows = (h + 7) >> 3;
271 
272  if (s->intra_pred_data[0] && cols == s->cols && rows == s->rows && s->pix_fmt == s->last_fmt)
273  return changed;
274 
275  s->last_fmt = s->pix_fmt;
276  s->sb_cols = (w + 63) >> 6;
277  s->sb_rows = (h + 63) >> 6;
278  s->cols = (w + 7) >> 3;
279  s->rows = (h + 7) >> 3;
280  lflvl_len = avctx->active_thread_type == FF_THREAD_SLICE ? s->sb_rows : 1;
281 
282 #define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)
283  av_freep(&s->intra_pred_data[0]);
284  // FIXME we slightly over-allocate here for subsampled chroma, but a little
285  // bit of padding shouldn't affect performance...
286  p = av_malloc(s->sb_cols * (128 + 192 * bytesperpixel +
287  lflvl_len * sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx)));
288  if (!p)
289  return AVERROR(ENOMEM);
290  assign(s->intra_pred_data[0], uint8_t *, 64 * bytesperpixel);
291  assign(s->intra_pred_data[1], uint8_t *, 64 * bytesperpixel);
292  assign(s->intra_pred_data[2], uint8_t *, 64 * bytesperpixel);
293  assign(s->above_y_nnz_ctx, uint8_t *, 16);
294  assign(s->above_mode_ctx, uint8_t *, 16);
295  assign(s->above_mv_ctx, VP9mv(*)[2], 16);
296  assign(s->above_uv_nnz_ctx[0], uint8_t *, 16);
297  assign(s->above_uv_nnz_ctx[1], uint8_t *, 16);
298  assign(s->above_partition_ctx, uint8_t *, 8);
299  assign(s->above_skip_ctx, uint8_t *, 8);
300  assign(s->above_txfm_ctx, uint8_t *, 8);
301  assign(s->above_segpred_ctx, uint8_t *, 8);
302  assign(s->above_intra_ctx, uint8_t *, 8);
303  assign(s->above_comp_ctx, uint8_t *, 8);
304  assign(s->above_ref_ctx, uint8_t *, 8);
305  assign(s->above_filter_ctx, uint8_t *, 8);
306  assign(s->lflvl, VP9Filter *, lflvl_len);
307 #undef assign
308 
309  if (s->td) {
310  for (i = 0; i < s->active_tile_cols; i++)
311  vp9_tile_data_free(&s->td[i]);
312  }
313 
314  if (s->s.h.bpp != s->last_bpp) {
315  ff_vp9dsp_init(&s->dsp, s->s.h.bpp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
316  ff_videodsp_init(&s->vdsp, s->s.h.bpp);
317  s->last_bpp = s->s.h.bpp;
318  changed = 1;
319  }
320 
321  return changed;
322 }
323 
325 {
326  int i;
327  VP9Context *s = avctx->priv_data;
328  int chroma_blocks, chroma_eobs, bytesperpixel = s->bytesperpixel;
329  VP9TileData *td = &s->td[0];
330 
331  if (td->b_base && td->block_base && s->block_alloc_using_2pass == s->s.frames[CUR_FRAME].uses_2pass)
332  return 0;
333 
334  vp9_tile_data_free(td);
335  chroma_blocks = 64 * 64 >> (s->ss_h + s->ss_v);
336  chroma_eobs = 16 * 16 >> (s->ss_h + s->ss_v);
337  if (s->s.frames[CUR_FRAME].uses_2pass) {
338  int sbs = s->sb_cols * s->sb_rows;
339 
340  td->b_base = av_malloc_array(s->cols * s->rows, sizeof(VP9Block));
341  td->block_base = av_mallocz(((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
342  16 * 16 + 2 * chroma_eobs) * sbs);
343  if (!td->b_base || !td->block_base)
344  return AVERROR(ENOMEM);
345  td->uvblock_base[0] = td->block_base + sbs * 64 * 64 * bytesperpixel;
346  td->uvblock_base[1] = td->uvblock_base[0] + sbs * chroma_blocks * bytesperpixel;
347  td->eob_base = (uint8_t *) (td->uvblock_base[1] + sbs * chroma_blocks * bytesperpixel);
348  td->uveob_base[0] = td->eob_base + 16 * 16 * sbs;
349  td->uveob_base[1] = td->uveob_base[0] + chroma_eobs * sbs;
350 
352  td->block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
353  if (!td->block_structure)
354  return AVERROR(ENOMEM);
355  }
356  } else {
357  for (i = 1; i < s->active_tile_cols; i++)
358  vp9_tile_data_free(&s->td[i]);
359 
360  for (i = 0; i < s->active_tile_cols; i++) {
361  s->td[i].b_base = av_malloc(sizeof(VP9Block));
362  s->td[i].block_base = av_mallocz((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
363  16 * 16 + 2 * chroma_eobs);
364  if (!s->td[i].b_base || !s->td[i].block_base)
365  return AVERROR(ENOMEM);
366  s->td[i].uvblock_base[0] = s->td[i].block_base + 64 * 64 * bytesperpixel;
367  s->td[i].uvblock_base[1] = s->td[i].uvblock_base[0] + chroma_blocks * bytesperpixel;
368  s->td[i].eob_base = (uint8_t *) (s->td[i].uvblock_base[1] + chroma_blocks * bytesperpixel);
369  s->td[i].uveob_base[0] = s->td[i].eob_base + 16 * 16;
370  s->td[i].uveob_base[1] = s->td[i].uveob_base[0] + chroma_eobs;
371 
373  s->td[i].block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
374  if (!s->td[i].block_structure)
375  return AVERROR(ENOMEM);
376  }
377  }
378  }
379  s->block_alloc_using_2pass = s->s.frames[CUR_FRAME].uses_2pass;
380 
381  return 0;
382 }
383 
384 // The sign bit is at the end, not the start, of a bit sequence
386 {
387  int v = get_bits(gb, n);
388  return get_bits1(gb) ? -v : v;
389 }
390 
391 static av_always_inline int inv_recenter_nonneg(int v, int m)
392 {
393  if (v > 2 * m)
394  return v;
395  if (v & 1)
396  return m - ((v + 1) >> 1);
397  return m + (v >> 1);
398 }
399 
400 // differential forward probability updates
401 static int update_prob(VPXRangeCoder *c, int p)
402 {
403  static const uint8_t inv_map_table[255] = {
404  7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
405  189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
406  10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
407  25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39,
408  40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54,
409  55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
410  70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
411  86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100,
412  101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
413  116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
414  131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
415  146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
416  161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
417  177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
418  192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
419  207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
420  222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
421  237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
422  252, 253, 253,
423  };
424  int d;
425 
426  /* This code is trying to do a differential probability update. For a
427  * current probability A in the range [1, 255], the difference to a new
428  * probability of any value can be expressed differentially as 1-A, 255-A
429  * where some part of this (absolute range) exists both in positive as
430  * well as the negative part, whereas another part only exists in one
431  * half. We're trying to code this shared part differentially, i.e.
432  * times two where the value of the lowest bit specifies the sign, and
433  * the single part is then coded on top of this. This absolute difference
434  * then again has a value of [0, 254], but a bigger value in this range
435  * indicates that we're further away from the original value A, so we
436  * can code this as a VLC code, since higher values are increasingly
437  * unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough'
438  * updates vs. the 'fine, exact' updates further down the range, which
439  * adds one extra dimension to this differential update model. */
440 
441  if (!vp89_rac_get(c)) {
442  d = vp89_rac_get_uint(c, 4) + 0;
443  } else if (!vp89_rac_get(c)) {
444  d = vp89_rac_get_uint(c, 4) + 16;
445  } else if (!vp89_rac_get(c)) {
446  d = vp89_rac_get_uint(c, 5) + 32;
447  } else {
448  d = vp89_rac_get_uint(c, 7);
449  if (d >= 65)
450  d = (d << 1) - 65 + vp89_rac_get(c);
451  d += 64;
452  av_assert2(d < FF_ARRAY_ELEMS(inv_map_table));
453  }
454 
455  return p <= 128 ? 1 + inv_recenter_nonneg(inv_map_table[d], p - 1) :
456  255 - inv_recenter_nonneg(inv_map_table[d], 255 - p);
457 }
458 
460 {
461  static const enum AVColorSpace colorspaces[8] = {
464  };
465  VP9Context *s = avctx->priv_data;
466  int bits = avctx->profile <= 1 ? 0 : 1 + get_bits1(&s->gb); // 0:8, 1:10, 2:12
467 
468  s->bpp_index = bits;
469  s->s.h.bpp = 8 + bits * 2;
470  s->bytesperpixel = (7 + s->s.h.bpp) >> 3;
471  avctx->colorspace = colorspaces[get_bits(&s->gb, 3)];
472  if (avctx->colorspace == AVCOL_SPC_RGB) { // RGB = profile 1
473  static const enum AVPixelFormat pix_fmt_rgb[3] = {
475  };
476  s->ss_h = s->ss_v = 0;
477  avctx->color_range = AVCOL_RANGE_JPEG;
478  s->pix_fmt = pix_fmt_rgb[bits];
479  if (avctx->profile & 1) {
480  if (get_bits1(&s->gb)) {
481  av_log(avctx, AV_LOG_ERROR, "Reserved bit set in RGB\n");
482  return AVERROR_INVALIDDATA;
483  }
484  } else {
485  av_log(avctx, AV_LOG_ERROR, "RGB not supported in profile %d\n",
486  avctx->profile);
487  return AVERROR_INVALIDDATA;
488  }
489  } else {
490  static const enum AVPixelFormat pix_fmt_for_ss[3][2 /* v */][2 /* h */] = {
497  };
499  if (avctx->profile & 1) {
500  s->ss_h = get_bits1(&s->gb);
501  s->ss_v = get_bits1(&s->gb);
502  s->pix_fmt = pix_fmt_for_ss[bits][s->ss_v][s->ss_h];
503  if (s->pix_fmt == AV_PIX_FMT_YUV420P) {
504  av_log(avctx, AV_LOG_ERROR, "YUV 4:2:0 not supported in profile %d\n",
505  avctx->profile);
506  return AVERROR_INVALIDDATA;
507  } else if (get_bits1(&s->gb)) {
508  av_log(avctx, AV_LOG_ERROR, "Profile %d color details reserved bit set\n",
509  avctx->profile);
510  return AVERROR_INVALIDDATA;
511  }
512  } else {
513  s->ss_h = s->ss_v = 1;
514  s->pix_fmt = pix_fmt_for_ss[bits][1][1];
515  }
516  }
517 
518  return 0;
519 }
520 
522  const uint8_t *data, int size, int *ref)
523 {
524  VP9Context *s = avctx->priv_data;
525  int c, i, j, k, l, m, n, w, h, max, size2, ret, sharp;
526  int last_invisible;
527  const uint8_t *data2;
528  int changed;
529 
530  /* general header */
531  if ((ret = init_get_bits8(&s->gb, data, size)) < 0) {
532  av_log(avctx, AV_LOG_ERROR, "Failed to initialize bitstream reader\n");
533  return ret;
534  }
535  if (get_bits(&s->gb, 2) != 0x2) { // frame marker
536  av_log(avctx, AV_LOG_ERROR, "Invalid frame marker\n");
537  return AVERROR_INVALIDDATA;
538  }
539  avctx->profile = get_bits1(&s->gb);
540  avctx->profile |= get_bits1(&s->gb) << 1;
541  if (avctx->profile == 3) avctx->profile += get_bits1(&s->gb);
542  if (avctx->profile > 3) {
543  av_log(avctx, AV_LOG_ERROR, "Profile %d is not yet supported\n", avctx->profile);
544  return AVERROR_INVALIDDATA;
545  }
546  s->s.h.profile = avctx->profile;
547  if (get_bits1(&s->gb)) {
548  *ref = get_bits(&s->gb, 3);
549  return 0;
550  }
551 
552  s->last_keyframe = s->s.h.keyframe;
553  s->s.h.keyframe = !get_bits1(&s->gb);
554 
555  last_invisible = s->s.h.invisible;
556  s->s.h.invisible = !get_bits1(&s->gb);
557  s->s.h.errorres = get_bits1(&s->gb);
558  s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible;
559 
560  if (s->s.h.keyframe) {
561  if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
562  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
563  return AVERROR_INVALIDDATA;
564  }
565  if ((ret = read_colorspace_details(avctx)) < 0)
566  return ret;
567  // for profile 1, here follows the subsampling bits
568  s->s.h.refreshrefmask = 0xff;
569  w = get_bits(&s->gb, 16) + 1;
570  h = get_bits(&s->gb, 16) + 1;
571  if (get_bits1(&s->gb)) // display size
572  skip_bits(&s->gb, 32);
573  } else {
574  s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0;
575  s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2);
576  if (s->s.h.intraonly) {
577  if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
578  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
579  return AVERROR_INVALIDDATA;
580  }
581  if (avctx->profile >= 1) {
582  if ((ret = read_colorspace_details(avctx)) < 0)
583  return ret;
584  } else {
585  s->ss_h = s->ss_v = 1;
586  s->s.h.bpp = 8;
587  s->bpp_index = 0;
588  s->bytesperpixel = 1;
589  s->pix_fmt = AV_PIX_FMT_YUV420P;
590  avctx->colorspace = AVCOL_SPC_BT470BG;
591  avctx->color_range = AVCOL_RANGE_MPEG;
592  }
593  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
594  w = get_bits(&s->gb, 16) + 1;
595  h = get_bits(&s->gb, 16) + 1;
596  if (get_bits1(&s->gb)) // display size
597  skip_bits(&s->gb, 32);
598  } else {
599  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
600  s->s.h.refidx[0] = get_bits(&s->gb, 3);
601  s->s.h.signbias[0] = get_bits1(&s->gb) && !s->s.h.errorres;
602  s->s.h.refidx[1] = get_bits(&s->gb, 3);
603  s->s.h.signbias[1] = get_bits1(&s->gb) && !s->s.h.errorres;
604  s->s.h.refidx[2] = get_bits(&s->gb, 3);
605  s->s.h.signbias[2] = get_bits1(&s->gb) && !s->s.h.errorres;
606  if (!s->s.refs[s->s.h.refidx[0]].f ||
607  !s->s.refs[s->s.h.refidx[1]].f ||
608  !s->s.refs[s->s.h.refidx[2]].f) {
609  av_log(avctx, AV_LOG_ERROR, "Not all references are available\n");
610  return AVERROR_INVALIDDATA;
611  }
612  if (get_bits1(&s->gb)) {
613  w = s->s.refs[s->s.h.refidx[0]].f->width;
614  h = s->s.refs[s->s.h.refidx[0]].f->height;
615  } else if (get_bits1(&s->gb)) {
616  w = s->s.refs[s->s.h.refidx[1]].f->width;
617  h = s->s.refs[s->s.h.refidx[1]].f->height;
618  } else if (get_bits1(&s->gb)) {
619  w = s->s.refs[s->s.h.refidx[2]].f->width;
620  h = s->s.refs[s->s.h.refidx[2]].f->height;
621  } else {
622  w = get_bits(&s->gb, 16) + 1;
623  h = get_bits(&s->gb, 16) + 1;
624  }
625  // Note that in this code, "CUR_FRAME" is actually before we
626  // have formally allocated a frame, and thus actually represents
627  // the _last_ frame
628  s->s.h.use_last_frame_mvs &= s->s.frames[CUR_FRAME].tf.f &&
629  s->s.frames[CUR_FRAME].tf.f->width == w &&
630  s->s.frames[CUR_FRAME].tf.f->height == h;
631  if (get_bits1(&s->gb)) // display size
632  skip_bits(&s->gb, 32);
633  s->s.h.highprecisionmvs = get_bits1(&s->gb);
634  s->s.h.filtermode = get_bits1(&s->gb) ? FILTER_SWITCHABLE :
635  get_bits(&s->gb, 2);
636  s->s.h.allowcompinter = s->s.h.signbias[0] != s->s.h.signbias[1] ||
637  s->s.h.signbias[0] != s->s.h.signbias[2];
638  if (s->s.h.allowcompinter) {
639  if (s->s.h.signbias[0] == s->s.h.signbias[1]) {
640  s->s.h.fixcompref = 2;
641  s->s.h.varcompref[0] = 0;
642  s->s.h.varcompref[1] = 1;
643  } else if (s->s.h.signbias[0] == s->s.h.signbias[2]) {
644  s->s.h.fixcompref = 1;
645  s->s.h.varcompref[0] = 0;
646  s->s.h.varcompref[1] = 2;
647  } else {
648  s->s.h.fixcompref = 0;
649  s->s.h.varcompref[0] = 1;
650  s->s.h.varcompref[1] = 2;
651  }
652  }
653  }
654  }
655  s->s.h.refreshctx = s->s.h.errorres ? 0 : get_bits1(&s->gb);
656  s->s.h.parallelmode = s->s.h.errorres ? 1 : get_bits1(&s->gb);
657  s->s.h.framectxid = c = get_bits(&s->gb, 2);
658  if (s->s.h.keyframe || s->s.h.intraonly)
659  s->s.h.framectxid = 0; // BUG: libvpx ignores this field in keyframes
660 
661  /* loopfilter header data */
662  if (s->s.h.keyframe || s->s.h.errorres || s->s.h.intraonly) {
663  // reset loopfilter defaults
664  s->s.h.lf_delta.ref[0] = 1;
665  s->s.h.lf_delta.ref[1] = 0;
666  s->s.h.lf_delta.ref[2] = -1;
667  s->s.h.lf_delta.ref[3] = -1;
668  s->s.h.lf_delta.mode[0] = 0;
669  s->s.h.lf_delta.mode[1] = 0;
670  memset(s->s.h.segmentation.feat, 0, sizeof(s->s.h.segmentation.feat));
671  }
672  s->s.h.filter.level = get_bits(&s->gb, 6);
673  sharp = get_bits(&s->gb, 3);
674  // if sharpness changed, reinit lim/mblim LUTs. if it didn't change, keep
675  // the old cache values since they are still valid
676  if (s->s.h.filter.sharpness != sharp) {
677  for (i = 1; i <= 63; i++) {
678  int limit = i;
679 
680  if (sharp > 0) {
681  limit >>= (sharp + 3) >> 2;
682  limit = FFMIN(limit, 9 - sharp);
683  }
684  limit = FFMAX(limit, 1);
685 
686  s->filter_lut.lim_lut[i] = limit;
687  s->filter_lut.mblim_lut[i] = 2 * (i + 2) + limit;
688  }
689  }
690  s->s.h.filter.sharpness = sharp;
691  if ((s->s.h.lf_delta.enabled = get_bits1(&s->gb))) {
692  if ((s->s.h.lf_delta.updated = get_bits1(&s->gb))) {
693  for (i = 0; i < 4; i++)
694  if (get_bits1(&s->gb))
695  s->s.h.lf_delta.ref[i] = get_sbits_inv(&s->gb, 6);
696  for (i = 0; i < 2; i++)
697  if (get_bits1(&s->gb))
698  s->s.h.lf_delta.mode[i] = get_sbits_inv(&s->gb, 6);
699  }
700  }
701 
702  /* quantization header data */
703  s->s.h.yac_qi = get_bits(&s->gb, 8);
704  s->s.h.ydc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
705  s->s.h.uvdc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
706  s->s.h.uvac_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
707  s->s.h.lossless = s->s.h.yac_qi == 0 && s->s.h.ydc_qdelta == 0 &&
708  s->s.h.uvdc_qdelta == 0 && s->s.h.uvac_qdelta == 0;
709 #if FF_API_CODEC_PROPS
711  if (s->s.h.lossless)
714 #endif
715 
716  /* segmentation header info */
717  if ((s->s.h.segmentation.enabled = get_bits1(&s->gb))) {
718  if ((s->s.h.segmentation.update_map = get_bits1(&s->gb))) {
719  for (i = 0; i < 7; i++)
720  s->s.h.segmentation.prob[i] = get_bits1(&s->gb) ?
721  get_bits(&s->gb, 8) : 255;
722  if ((s->s.h.segmentation.temporal = get_bits1(&s->gb)))
723  for (i = 0; i < 3; i++)
724  s->s.h.segmentation.pred_prob[i] = get_bits1(&s->gb) ?
725  get_bits(&s->gb, 8) : 255;
726  }
727 
728  if (get_bits1(&s->gb)) {
729  s->s.h.segmentation.absolute_vals = get_bits1(&s->gb);
730  for (i = 0; i < 8; i++) {
731  if ((s->s.h.segmentation.feat[i].q_enabled = get_bits1(&s->gb)))
732  s->s.h.segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8);
733  if ((s->s.h.segmentation.feat[i].lf_enabled = get_bits1(&s->gb)))
734  s->s.h.segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6);
735  if ((s->s.h.segmentation.feat[i].ref_enabled = get_bits1(&s->gb)))
736  s->s.h.segmentation.feat[i].ref_val = get_bits(&s->gb, 2);
737  s->s.h.segmentation.feat[i].skip_enabled = get_bits1(&s->gb);
738  }
739  }
740  } else {
741  // Reset fields under segmentation switch if segmentation is disabled.
742  // This is necessary because some hwaccels don't ignore these fields
743  // if segmentation is disabled.
744  s->s.h.segmentation.temporal = 0;
745  s->s.h.segmentation.update_map = 0;
746  }
747 
748  // set qmul[] based on Y/UV, AC/DC and segmentation Q idx deltas
749  for (i = 0; i < (s->s.h.segmentation.enabled ? 8 : 1); i++) {
750  int qyac, qydc, quvac, quvdc, lflvl, sh;
751 
752  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].q_enabled) {
753  if (s->s.h.segmentation.absolute_vals)
754  qyac = av_clip_uintp2(s->s.h.segmentation.feat[i].q_val, 8);
755  else
756  qyac = av_clip_uintp2(s->s.h.yac_qi + s->s.h.segmentation.feat[i].q_val, 8);
757  } else {
758  qyac = s->s.h.yac_qi;
759  }
760  qydc = av_clip_uintp2(qyac + s->s.h.ydc_qdelta, 8);
761  quvdc = av_clip_uintp2(qyac + s->s.h.uvdc_qdelta, 8);
762  quvac = av_clip_uintp2(qyac + s->s.h.uvac_qdelta, 8);
763  qyac = av_clip_uintp2(qyac, 8);
764 
765  s->s.h.segmentation.feat[i].qmul[0][0] = ff_vp9_dc_qlookup[s->bpp_index][qydc];
766  s->s.h.segmentation.feat[i].qmul[0][1] = ff_vp9_ac_qlookup[s->bpp_index][qyac];
767  s->s.h.segmentation.feat[i].qmul[1][0] = ff_vp9_dc_qlookup[s->bpp_index][quvdc];
768  s->s.h.segmentation.feat[i].qmul[1][1] = ff_vp9_ac_qlookup[s->bpp_index][quvac];
769 
770  sh = s->s.h.filter.level >= 32;
771  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].lf_enabled) {
772  if (s->s.h.segmentation.absolute_vals)
773  lflvl = av_clip_uintp2(s->s.h.segmentation.feat[i].lf_val, 6);
774  else
775  lflvl = av_clip_uintp2(s->s.h.filter.level + s->s.h.segmentation.feat[i].lf_val, 6);
776  } else {
777  lflvl = s->s.h.filter.level;
778  }
779  if (s->s.h.lf_delta.enabled) {
780  s->s.h.segmentation.feat[i].lflvl[0][0] =
781  s->s.h.segmentation.feat[i].lflvl[0][1] =
782  av_clip_uintp2(lflvl + (s->s.h.lf_delta.ref[0] * (1 << sh)), 6);
783  for (j = 1; j < 4; j++) {
784  s->s.h.segmentation.feat[i].lflvl[j][0] =
785  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
786  s->s.h.lf_delta.mode[0]) * (1 << sh)), 6);
787  s->s.h.segmentation.feat[i].lflvl[j][1] =
788  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
789  s->s.h.lf_delta.mode[1]) * (1 << sh)), 6);
790  }
791  } else {
792  memset(s->s.h.segmentation.feat[i].lflvl, lflvl,
793  sizeof(s->s.h.segmentation.feat[i].lflvl));
794  }
795  }
796 
797  /* tiling info */
798  if ((changed = update_size(avctx, w, h)) < 0) {
799  av_log(avctx, AV_LOG_ERROR, "Failed to initialize decoder for %dx%d @ %d\n",
800  w, h, s->pix_fmt);
801  return changed;
802  }
803  for (s->s.h.tiling.log2_tile_cols = 0;
804  s->sb_cols > (64 << s->s.h.tiling.log2_tile_cols);
805  s->s.h.tiling.log2_tile_cols++) ;
806  for (max = 0; (s->sb_cols >> max) >= 4; max++) ;
807  max = FFMAX(0, max - 1);
808  while (max > s->s.h.tiling.log2_tile_cols) {
809  if (get_bits1(&s->gb))
810  s->s.h.tiling.log2_tile_cols++;
811  else
812  break;
813  }
814  s->s.h.tiling.log2_tile_rows = decode012(&s->gb);
815  s->s.h.tiling.tile_rows = 1 << s->s.h.tiling.log2_tile_rows;
816  if (s->s.h.tiling.tile_cols != (1 << s->s.h.tiling.log2_tile_cols) || changed) {
817  int n_range_coders;
818  VPXRangeCoder *rc;
819 
820  if (s->td) {
821  for (i = 0; i < s->active_tile_cols; i++)
822  vp9_tile_data_free(&s->td[i]);
823  av_freep(&s->td);
824  }
825 
826  s->s.h.tiling.tile_cols = 1 << s->s.h.tiling.log2_tile_cols;
827  s->active_tile_cols = avctx->active_thread_type == FF_THREAD_SLICE ?
828  s->s.h.tiling.tile_cols : 1;
829  vp9_alloc_entries(avctx, s->sb_rows);
830  if (avctx->active_thread_type == FF_THREAD_SLICE) {
831  n_range_coders = 4; // max_tile_rows
832  } else {
833  n_range_coders = s->s.h.tiling.tile_cols;
834  }
835  s->td = av_calloc(s->active_tile_cols, sizeof(VP9TileData) +
836  n_range_coders * sizeof(VPXRangeCoder));
837  if (!s->td)
838  return AVERROR(ENOMEM);
839  rc = (VPXRangeCoder *) &s->td[s->active_tile_cols];
840  for (i = 0; i < s->active_tile_cols; i++) {
841  s->td[i].s = s;
842  s->td[i].c_b = rc;
843  rc += n_range_coders;
844  }
845  }
846 
847  /* check reference frames */
848  if (!s->s.h.keyframe && !s->s.h.intraonly) {
849  int valid_ref_frame = 0;
850  for (i = 0; i < 3; i++) {
851  AVFrame *ref = s->s.refs[s->s.h.refidx[i]].f;
852  int refw = ref->width, refh = ref->height;
853 
854  if (ref->format != avctx->pix_fmt) {
855  av_log(avctx, AV_LOG_ERROR,
856  "Ref pixfmt (%s) did not match current frame (%s)",
857  av_get_pix_fmt_name(ref->format),
858  av_get_pix_fmt_name(avctx->pix_fmt));
859  return AVERROR_INVALIDDATA;
860  } else if (refw == w && refh == h) {
861  s->mvscale[i][0] = s->mvscale[i][1] = 0;
862  } else {
863  /* Check to make sure at least one of frames that */
864  /* this frame references has valid dimensions */
865  if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 * refh) {
866  av_log(avctx, AV_LOG_WARNING,
867  "Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
868  refw, refh, w, h);
869  s->mvscale[i][0] = s->mvscale[i][1] = REF_INVALID_SCALE;
870  continue;
871  }
872  s->mvscale[i][0] = (refw << 14) / w;
873  s->mvscale[i][1] = (refh << 14) / h;
874  s->mvstep[i][0] = 16 * s->mvscale[i][0] >> 14;
875  s->mvstep[i][1] = 16 * s->mvscale[i][1] >> 14;
876  }
877  valid_ref_frame++;
878  }
879  if (!valid_ref_frame) {
880  av_log(avctx, AV_LOG_ERROR, "No valid reference frame is found, bitstream not supported\n");
881  return AVERROR_INVALIDDATA;
882  }
883  }
884 
885  if (s->s.h.keyframe || s->s.h.errorres || (s->s.h.intraonly && s->s.h.resetctx == 3)) {
886  s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p =
887  s->prob_ctx[3].p = ff_vp9_default_probs;
888  memcpy(s->prob_ctx[0].coef, ff_vp9_default_coef_probs,
889  sizeof(ff_vp9_default_coef_probs));
890  memcpy(s->prob_ctx[1].coef, ff_vp9_default_coef_probs,
891  sizeof(ff_vp9_default_coef_probs));
892  memcpy(s->prob_ctx[2].coef, ff_vp9_default_coef_probs,
893  sizeof(ff_vp9_default_coef_probs));
894  memcpy(s->prob_ctx[3].coef, ff_vp9_default_coef_probs,
895  sizeof(ff_vp9_default_coef_probs));
896  } else if (s->s.h.intraonly && s->s.h.resetctx == 2) {
897  s->prob_ctx[c].p = ff_vp9_default_probs;
898  memcpy(s->prob_ctx[c].coef, ff_vp9_default_coef_probs,
899  sizeof(ff_vp9_default_coef_probs));
900  }
901 
902  // next 16 bits is size of the rest of the header (arith-coded)
903  s->s.h.compressed_header_size = size2 = get_bits(&s->gb, 16);
904  s->s.h.uncompressed_header_size = (get_bits_count(&s->gb) + 7) / 8;
905 
906  data2 = align_get_bits(&s->gb);
907  if (size2 > size - (data2 - data)) {
908  av_log(avctx, AV_LOG_ERROR, "Invalid compressed header size\n");
909  return AVERROR_INVALIDDATA;
910  }
911  ret = ff_vpx_init_range_decoder(&s->c, data2, size2);
912  if (ret < 0)
913  return ret;
914 
915  if (vpx_rac_get_prob_branchy(&s->c, 128)) { // marker bit
916  av_log(avctx, AV_LOG_ERROR, "Marker bit was set\n");
917  return AVERROR_INVALIDDATA;
918  }
919 
920  for (i = 0; i < s->active_tile_cols; i++) {
921  if (s->s.h.keyframe || s->s.h.intraonly) {
922  memset(s->td[i].counts.coef, 0, sizeof(s->td[0].counts.coef));
923  memset(s->td[i].counts.eob, 0, sizeof(s->td[0].counts.eob));
924  } else {
925  memset(&s->td[i].counts, 0, sizeof(s->td[0].counts));
926  }
927  s->td[i].nb_block_structure = 0;
928  }
929 
930  /* FIXME is it faster to not copy here, but do it down in the fw updates
931  * as explicit copies if the fw update is missing (and skip the copy upon
932  * fw update)? */
933  s->prob.p = s->prob_ctx[c].p;
934 
935  // txfm updates
936  if (s->s.h.lossless) {
937  s->s.h.txfmmode = TX_4X4;
938  } else {
939  s->s.h.txfmmode = vp89_rac_get_uint(&s->c, 2);
940  if (s->s.h.txfmmode == 3)
941  s->s.h.txfmmode += vp89_rac_get(&s->c);
942 
943  if (s->s.h.txfmmode == TX_SWITCHABLE) {
944  for (i = 0; i < 2; i++)
945  if (vpx_rac_get_prob_branchy(&s->c, 252))
946  s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i]);
947  for (i = 0; i < 2; i++)
948  for (j = 0; j < 2; j++)
949  if (vpx_rac_get_prob_branchy(&s->c, 252))
950  s->prob.p.tx16p[i][j] =
951  update_prob(&s->c, s->prob.p.tx16p[i][j]);
952  for (i = 0; i < 2; i++)
953  for (j = 0; j < 3; j++)
954  if (vpx_rac_get_prob_branchy(&s->c, 252))
955  s->prob.p.tx32p[i][j] =
956  update_prob(&s->c, s->prob.p.tx32p[i][j]);
957  }
958  }
959 
960  // coef updates
961  for (i = 0; i < 4; i++) {
962  uint8_t (*ref)[2][6][6][3] = s->prob_ctx[c].coef[i];
963  if (vp89_rac_get(&s->c)) {
964  for (j = 0; j < 2; j++)
965  for (k = 0; k < 2; k++)
966  for (l = 0; l < 6; l++)
967  for (m = 0; m < 6; m++) {
968  uint8_t *p = s->prob.coef[i][j][k][l][m];
969  uint8_t *r = ref[j][k][l][m];
970  if (m >= 3 && l == 0) // dc only has 3 pt
971  break;
972  for (n = 0; n < 3; n++) {
973  if (vpx_rac_get_prob_branchy(&s->c, 252))
974  p[n] = update_prob(&s->c, r[n]);
975  else
976  p[n] = r[n];
977  }
978  memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
979  }
980  } else {
981  for (j = 0; j < 2; j++)
982  for (k = 0; k < 2; k++)
983  for (l = 0; l < 6; l++)
984  for (m = 0; m < 6; m++) {
985  uint8_t *p = s->prob.coef[i][j][k][l][m];
986  uint8_t *r = ref[j][k][l][m];
987  if (m > 3 && l == 0) // dc only has 3 pt
988  break;
989  memcpy(p, r, 3);
990  memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
991  }
992  }
993  if (s->s.h.txfmmode == i)
994  break;
995  }
996 
997  // mode updates
998  for (i = 0; i < 3; i++)
999  if (vpx_rac_get_prob_branchy(&s->c, 252))
1000  s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i]);
1001  if (!s->s.h.keyframe && !s->s.h.intraonly) {
1002  for (i = 0; i < 7; i++)
1003  for (j = 0; j < 3; j++)
1004  if (vpx_rac_get_prob_branchy(&s->c, 252))
1005  s->prob.p.mv_mode[i][j] =
1006  update_prob(&s->c, s->prob.p.mv_mode[i][j]);
1007 
1008  if (s->s.h.filtermode == FILTER_SWITCHABLE)
1009  for (i = 0; i < 4; i++)
1010  for (j = 0; j < 2; j++)
1011  if (vpx_rac_get_prob_branchy(&s->c, 252))
1012  s->prob.p.filter[i][j] =
1013  update_prob(&s->c, s->prob.p.filter[i][j]);
1014 
1015  for (i = 0; i < 4; i++)
1016  if (vpx_rac_get_prob_branchy(&s->c, 252))
1017  s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i]);
1018 
1019  if (s->s.h.allowcompinter) {
1020  s->s.h.comppredmode = vp89_rac_get(&s->c);
1021  if (s->s.h.comppredmode)
1022  s->s.h.comppredmode += vp89_rac_get(&s->c);
1023  if (s->s.h.comppredmode == PRED_SWITCHABLE)
1024  for (i = 0; i < 5; i++)
1025  if (vpx_rac_get_prob_branchy(&s->c, 252))
1026  s->prob.p.comp[i] =
1027  update_prob(&s->c, s->prob.p.comp[i]);
1028  } else {
1029  s->s.h.comppredmode = PRED_SINGLEREF;
1030  }
1031 
1032  if (s->s.h.comppredmode != PRED_COMPREF) {
1033  for (i = 0; i < 5; i++) {
1034  if (vpx_rac_get_prob_branchy(&s->c, 252))
1035  s->prob.p.single_ref[i][0] =
1036  update_prob(&s->c, s->prob.p.single_ref[i][0]);
1037  if (vpx_rac_get_prob_branchy(&s->c, 252))
1038  s->prob.p.single_ref[i][1] =
1039  update_prob(&s->c, s->prob.p.single_ref[i][1]);
1040  }
1041  }
1042 
1043  if (s->s.h.comppredmode != PRED_SINGLEREF) {
1044  for (i = 0; i < 5; i++)
1045  if (vpx_rac_get_prob_branchy(&s->c, 252))
1046  s->prob.p.comp_ref[i] =
1047  update_prob(&s->c, s->prob.p.comp_ref[i]);
1048  }
1049 
1050  for (i = 0; i < 4; i++)
1051  for (j = 0; j < 9; j++)
1052  if (vpx_rac_get_prob_branchy(&s->c, 252))
1053  s->prob.p.y_mode[i][j] =
1054  update_prob(&s->c, s->prob.p.y_mode[i][j]);
1055 
1056  for (i = 0; i < 4; i++)
1057  for (j = 0; j < 4; j++)
1058  for (k = 0; k < 3; k++)
1059  if (vpx_rac_get_prob_branchy(&s->c, 252))
1060  s->prob.p.partition[3 - i][j][k] =
1061  update_prob(&s->c,
1062  s->prob.p.partition[3 - i][j][k]);
1063 
1064  // mv fields don't use the update_prob subexp model for some reason
1065  for (i = 0; i < 3; i++)
1066  if (vpx_rac_get_prob_branchy(&s->c, 252))
1067  s->prob.p.mv_joint[i] = (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1068 
1069  for (i = 0; i < 2; i++) {
1070  if (vpx_rac_get_prob_branchy(&s->c, 252))
1071  s->prob.p.mv_comp[i].sign =
1072  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1073 
1074  for (j = 0; j < 10; j++)
1075  if (vpx_rac_get_prob_branchy(&s->c, 252))
1076  s->prob.p.mv_comp[i].classes[j] =
1077  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1078 
1079  if (vpx_rac_get_prob_branchy(&s->c, 252))
1080  s->prob.p.mv_comp[i].class0 =
1081  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1082 
1083  for (j = 0; j < 10; j++)
1084  if (vpx_rac_get_prob_branchy(&s->c, 252))
1085  s->prob.p.mv_comp[i].bits[j] =
1086  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1087  }
1088 
1089  for (i = 0; i < 2; i++) {
1090  for (j = 0; j < 2; j++)
1091  for (k = 0; k < 3; k++)
1092  if (vpx_rac_get_prob_branchy(&s->c, 252))
1093  s->prob.p.mv_comp[i].class0_fp[j][k] =
1094  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1095 
1096  for (j = 0; j < 3; j++)
1097  if (vpx_rac_get_prob_branchy(&s->c, 252))
1098  s->prob.p.mv_comp[i].fp[j] =
1099  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1100  }
1101 
1102  if (s->s.h.highprecisionmvs) {
1103  for (i = 0; i < 2; i++) {
1104  if (vpx_rac_get_prob_branchy(&s->c, 252))
1105  s->prob.p.mv_comp[i].class0_hp =
1106  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1107 
1108  if (vpx_rac_get_prob_branchy(&s->c, 252))
1109  s->prob.p.mv_comp[i].hp =
1110  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1111  }
1112  }
1113  }
1114 
1115  return (data2 - data) + size2;
1116 }
1117 
1118 static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl,
1119  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1120 {
1121  const VP9Context *s = td->s;
1122  int c = ((s->above_partition_ctx[col] >> (3 - bl)) & 1) |
1123  (((td->left_partition_ctx[row & 0x7] >> (3 - bl)) & 1) << 1);
1124  const uint8_t *p = s->s.h.keyframe || s->s.h.intraonly ? ff_vp9_default_kf_partition_probs[bl][c] :
1125  s->prob.p.partition[bl][c];
1126  enum BlockPartition bp;
1127  ptrdiff_t hbs = 4 >> bl;
1128  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1129  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1130  int bytesperpixel = s->bytesperpixel;
1131 
1132  if (bl == BL_8X8) {
1134  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1135  } else if (col + hbs < s->cols) { // FIXME why not <=?
1136  if (row + hbs < s->rows) { // FIXME why not <=?
1138  switch (bp) {
1139  case PARTITION_NONE:
1140  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1141  break;
1142  case PARTITION_H:
1143  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1144  yoff += hbs * 8 * y_stride;
1145  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1146  ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, bl, bp);
1147  break;
1148  case PARTITION_V:
1149  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1150  yoff += hbs * 8 * bytesperpixel;
1151  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1152  ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, bl, bp);
1153  break;
1154  case PARTITION_SPLIT:
1155  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1156  decode_sb(td, row, col + hbs, lflvl,
1157  yoff + 8 * hbs * bytesperpixel,
1158  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1159  yoff += hbs * 8 * y_stride;
1160  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1161  decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1162  decode_sb(td, row + hbs, col + hbs, lflvl,
1163  yoff + 8 * hbs * bytesperpixel,
1164  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1165  break;
1166  default:
1167  av_unreachable("ff_vp9_partition_tree only has "
1168  "the four PARTITION_* terminal codes");
1169  }
1170  } else if (vpx_rac_get_prob_branchy(td->c, p[1])) {
1171  bp = PARTITION_SPLIT;
1172  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1173  decode_sb(td, row, col + hbs, lflvl,
1174  yoff + 8 * hbs * bytesperpixel,
1175  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1176  } else {
1177  bp = PARTITION_H;
1178  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1179  }
1180  } else if (row + hbs < s->rows) { // FIXME why not <=?
1181  if (vpx_rac_get_prob_branchy(td->c, p[2])) {
1182  bp = PARTITION_SPLIT;
1183  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1184  yoff += hbs * 8 * y_stride;
1185  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1186  decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1187  } else {
1188  bp = PARTITION_V;
1189  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1190  }
1191  } else {
1192  bp = PARTITION_SPLIT;
1193  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1194  }
1195  td->counts.partition[bl][c][bp]++;
1196 }
1197 
1198 static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl,
1199  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1200 {
1201  const VP9Context *s = td->s;
1202  VP9Block *b = td->b;
1203  ptrdiff_t hbs = 4 >> bl;
1204  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1205  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1206  int bytesperpixel = s->bytesperpixel;
1207 
1208  if (bl == BL_8X8) {
1209  av_assert2(b->bl == BL_8X8);
1210  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1211  } else if (td->b->bl == bl) {
1212  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1213  if (b->bp == PARTITION_H && row + hbs < s->rows) {
1214  yoff += hbs * 8 * y_stride;
1215  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1216  ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, b->bl, b->bp);
1217  } else if (b->bp == PARTITION_V && col + hbs < s->cols) {
1218  yoff += hbs * 8 * bytesperpixel;
1219  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1220  ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, b->bl, b->bp);
1221  }
1222  } else {
1223  decode_sb_mem(td, row, col, lflvl, yoff, uvoff, bl + 1);
1224  if (col + hbs < s->cols) { // FIXME why not <=?
1225  if (row + hbs < s->rows) {
1226  decode_sb_mem(td, row, col + hbs, lflvl, yoff + 8 * hbs * bytesperpixel,
1227  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1228  yoff += hbs * 8 * y_stride;
1229  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1230  decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1231  decode_sb_mem(td, row + hbs, col + hbs, lflvl,
1232  yoff + 8 * hbs * bytesperpixel,
1233  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1234  } else {
1235  yoff += hbs * 8 * bytesperpixel;
1236  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1237  decode_sb_mem(td, row, col + hbs, lflvl, yoff, uvoff, bl + 1);
1238  }
1239  } else if (row + hbs < s->rows) {
1240  yoff += hbs * 8 * y_stride;
1241  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1242  decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1243  }
1244  }
1245 }
1246 
1247 static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
1248 {
1249  int sb_start = ( idx * n) >> log2_n;
1250  int sb_end = ((idx + 1) * n) >> log2_n;
1251  *start = FFMIN(sb_start, n) << 3;
1252  *end = FFMIN(sb_end, n) << 3;
1253 }
1254 
1256 {
1257  int i;
1258 
1259  av_freep(&s->intra_pred_data[0]);
1260  for (i = 0; i < s->active_tile_cols; i++)
1261  vp9_tile_data_free(&s->td[i]);
1262 }
1263 
1265 {
1266  VP9Context *s = avctx->priv_data;
1267  int i;
1268 
1269  for (int i = 0; i < 3; i++)
1270  vp9_frame_unref(&s->s.frames[i]);
1271  av_refstruct_pool_uninit(&s->frame_extradata_pool);
1272  for (i = 0; i < 8; i++) {
1273  ff_progress_frame_unref(&s->s.refs[i]);
1274  ff_progress_frame_unref(&s->next_refs[i]);
1275  vp9_frame_unref(&s->s.ref_frames[i]);
1276  }
1277 
1278  free_buffers(s);
1279 #if HAVE_THREADS
1280  av_freep(&s->entries);
1281  ff_pthread_free(s, vp9_context_offsets);
1282 #endif
1283 
1284  av_refstruct_unref(&s->header_ref);
1285  ff_cbs_fragment_free(&s->current_frag);
1286  ff_cbs_close(&s->cbc);
1287 
1288  av_freep(&s->td);
1289  return 0;
1290 }
1291 
1292 static int decode_tiles(AVCodecContext *avctx,
1293  const uint8_t *data, int size)
1294 {
1295  VP9Context *s = avctx->priv_data;
1296  VP9TileData *td = &s->td[0];
1297  int row, col, tile_row, tile_col, ret;
1298  int bytesperpixel;
1299  int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1300  AVFrame *f;
1301  ptrdiff_t yoff, uvoff, ls_y, ls_uv;
1302 
1303  f = s->s.frames[CUR_FRAME].tf.f;
1304  ls_y = f->linesize[0];
1305  ls_uv =f->linesize[1];
1306  bytesperpixel = s->bytesperpixel;
1307 
1308  yoff = uvoff = 0;
1309  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1310  set_tile_offset(&tile_row_start, &tile_row_end,
1311  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1312 
1313  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1314  int64_t tile_size;
1315 
1316  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1317  tile_row == s->s.h.tiling.tile_rows - 1) {
1318  tile_size = size;
1319  } else {
1320  tile_size = AV_RB32(data);
1321  data += 4;
1322  size -= 4;
1323  }
1324  if (tile_size > size)
1325  return AVERROR_INVALIDDATA;
1326  ret = ff_vpx_init_range_decoder(&td->c_b[tile_col], data, tile_size);
1327  if (ret < 0)
1328  return ret;
1329  if (vpx_rac_get_prob_branchy(&td->c_b[tile_col], 128)) // marker bit
1330  return AVERROR_INVALIDDATA;
1331  data += tile_size;
1332  size -= tile_size;
1333  }
1334 
1335  for (row = tile_row_start; row < tile_row_end;
1336  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1337  VP9Filter *lflvl_ptr = s->lflvl;
1338  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1339 
1340  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1341  set_tile_offset(&tile_col_start, &tile_col_end,
1342  tile_col, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1343  td->tile_col_start = tile_col_start;
1344  if (s->pass != 2) {
1345  memset(td->left_partition_ctx, 0, 8);
1346  memset(td->left_skip_ctx, 0, 8);
1347  if (s->s.h.keyframe || s->s.h.intraonly) {
1348  memset(td->left_mode_ctx, DC_PRED, 16);
1349  } else {
1350  memset(td->left_mode_ctx, NEARESTMV, 8);
1351  }
1352  memset(td->left_y_nnz_ctx, 0, 16);
1353  memset(td->left_uv_nnz_ctx, 0, 32);
1354  memset(td->left_segpred_ctx, 0, 8);
1355 
1356  td->c = &td->c_b[tile_col];
1357  }
1358 
1359  for (col = tile_col_start;
1360  col < tile_col_end;
1361  col += 8, yoff2 += 64 * bytesperpixel,
1362  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1363  // FIXME integrate with lf code (i.e. zero after each
1364  // use, similar to invtxfm coefficients, or similar)
1365  if (s->pass != 1) {
1366  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1367  }
1368 
1369  if (s->pass == 2) {
1370  decode_sb_mem(td, row, col, lflvl_ptr,
1371  yoff2, uvoff2, BL_64X64);
1372  } else {
1373  if (vpx_rac_is_end(td->c)) {
1374  return AVERROR_INVALIDDATA;
1375  }
1376  decode_sb(td, row, col, lflvl_ptr,
1377  yoff2, uvoff2, BL_64X64);
1378  }
1379  }
1380  }
1381 
1382  if (s->pass == 1)
1383  continue;
1384 
1385  // backup pre-loopfilter reconstruction data for intra
1386  // prediction of next row of sb64s
1387  if (row + 8 < s->rows) {
1388  memcpy(s->intra_pred_data[0],
1389  f->data[0] + yoff + 63 * ls_y,
1390  8 * s->cols * bytesperpixel);
1391  memcpy(s->intra_pred_data[1],
1392  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1393  8 * s->cols * bytesperpixel >> s->ss_h);
1394  memcpy(s->intra_pred_data[2],
1395  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1396  8 * s->cols * bytesperpixel >> s->ss_h);
1397  }
1398 
1399  // loopfilter one row
1400  if (s->s.h.filter.level) {
1401  yoff2 = yoff;
1402  uvoff2 = uvoff;
1403  lflvl_ptr = s->lflvl;
1404  for (col = 0; col < s->cols;
1405  col += 8, yoff2 += 64 * bytesperpixel,
1406  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1407  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, row, col,
1408  yoff2, uvoff2);
1409  }
1410  }
1411 
1412  // FIXME maybe we can make this more finegrained by running the
1413  // loopfilter per-block instead of after each sbrow
1414  // In fact that would also make intra pred left preparation easier?
1415  ff_progress_frame_report(&s->s.frames[CUR_FRAME].tf, row >> 3);
1416  }
1417  }
1418  return 0;
1419 }
1420 
1421 #if HAVE_THREADS
1422 static av_always_inline
1423 int decode_tiles_mt(AVCodecContext *avctx, void *tdata, int jobnr,
1424  int threadnr)
1425 {
1426  VP9Context *s = avctx->priv_data;
1427  VP9TileData *td = &s->td[jobnr];
1428  ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1429  int bytesperpixel = s->bytesperpixel, row, col, tile_row;
1430  unsigned tile_cols_len;
1431  int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1432  VP9Filter *lflvl_ptr_base;
1433  AVFrame *f;
1434 
1435  f = s->s.frames[CUR_FRAME].tf.f;
1436  ls_y = f->linesize[0];
1437  ls_uv =f->linesize[1];
1438 
1439  set_tile_offset(&tile_col_start, &tile_col_end,
1440  jobnr, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1441  td->tile_col_start = tile_col_start;
1442  uvoff = (64 * bytesperpixel >> s->ss_h)*(tile_col_start >> 3);
1443  yoff = (64 * bytesperpixel)*(tile_col_start >> 3);
1444  lflvl_ptr_base = s->lflvl+(tile_col_start >> 3);
1445 
1446  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1447  set_tile_offset(&tile_row_start, &tile_row_end,
1448  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1449 
1450  td->c = &td->c_b[tile_row];
1451  for (row = tile_row_start; row < tile_row_end;
1452  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1453  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1454  VP9Filter *lflvl_ptr = lflvl_ptr_base+s->sb_cols*(row >> 3);
1455 
1456  memset(td->left_partition_ctx, 0, 8);
1457  memset(td->left_skip_ctx, 0, 8);
1458  if (s->s.h.keyframe || s->s.h.intraonly) {
1459  memset(td->left_mode_ctx, DC_PRED, 16);
1460  } else {
1461  memset(td->left_mode_ctx, NEARESTMV, 8);
1462  }
1463  memset(td->left_y_nnz_ctx, 0, 16);
1464  memset(td->left_uv_nnz_ctx, 0, 32);
1465  memset(td->left_segpred_ctx, 0, 8);
1466 
1467  for (col = tile_col_start;
1468  col < tile_col_end;
1469  col += 8, yoff2 += 64 * bytesperpixel,
1470  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1471  // FIXME integrate with lf code (i.e. zero after each
1472  // use, similar to invtxfm coefficients, or similar)
1473  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1474  decode_sb(td, row, col, lflvl_ptr,
1475  yoff2, uvoff2, BL_64X64);
1476  }
1477 
1478  // backup pre-loopfilter reconstruction data for intra
1479  // prediction of next row of sb64s
1480  tile_cols_len = tile_col_end - tile_col_start;
1481  if (row + 8 < s->rows) {
1482  memcpy(s->intra_pred_data[0] + (tile_col_start * 8 * bytesperpixel),
1483  f->data[0] + yoff + 63 * ls_y,
1484  8 * tile_cols_len * bytesperpixel);
1485  memcpy(s->intra_pred_data[1] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
1486  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1487  8 * tile_cols_len * bytesperpixel >> s->ss_h);
1488  memcpy(s->intra_pred_data[2] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
1489  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1490  8 * tile_cols_len * bytesperpixel >> s->ss_h);
1491  }
1492 
1493  vp9_report_tile_progress(s, row >> 3, 1);
1494  }
1495  }
1496  return 0;
1497 }
1498 
1499 static av_always_inline
1500 int loopfilter_proc(AVCodecContext *avctx)
1501 {
1502  VP9Context *s = avctx->priv_data;
1503  ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1504  VP9Filter *lflvl_ptr;
1505  int bytesperpixel = s->bytesperpixel, col, i;
1506  AVFrame *f;
1507 
1508  f = s->s.frames[CUR_FRAME].tf.f;
1509  ls_y = f->linesize[0];
1510  ls_uv =f->linesize[1];
1511 
1512  for (i = 0; i < s->sb_rows; i++) {
1513  vp9_await_tile_progress(s, i, s->s.h.tiling.tile_cols);
1514 
1515  if (s->s.h.filter.level) {
1516  yoff = (ls_y * 64)*i;
1517  uvoff = (ls_uv * 64 >> s->ss_v)*i;
1518  lflvl_ptr = s->lflvl+s->sb_cols*i;
1519  for (col = 0; col < s->cols;
1520  col += 8, yoff += 64 * bytesperpixel,
1521  uvoff += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1522  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, i << 3, col,
1523  yoff, uvoff);
1524  }
1525  }
1526  }
1527  return 0;
1528 }
1529 #endif
1530 
1532 {
1533  AVVideoEncParams *par;
1534  unsigned int tile, nb_blocks = 0;
1535 
1536  if (s->s.h.segmentation.enabled) {
1537  for (tile = 0; tile < s->active_tile_cols; tile++)
1538  nb_blocks += s->td[tile].nb_block_structure;
1539  }
1540 
1542  AV_VIDEO_ENC_PARAMS_VP9, nb_blocks);
1543  if (!par)
1544  return AVERROR(ENOMEM);
1545 
1546  par->qp = s->s.h.yac_qi;
1547  par->delta_qp[0][0] = s->s.h.ydc_qdelta;
1548  par->delta_qp[1][0] = s->s.h.uvdc_qdelta;
1549  par->delta_qp[2][0] = s->s.h.uvdc_qdelta;
1550  par->delta_qp[1][1] = s->s.h.uvac_qdelta;
1551  par->delta_qp[2][1] = s->s.h.uvac_qdelta;
1552 
1553  if (nb_blocks) {
1554  unsigned int block = 0;
1555  unsigned int tile, block_tile;
1556 
1557  for (tile = 0; tile < s->active_tile_cols; tile++) {
1558  VP9TileData *td = &s->td[tile];
1559 
1560  for (block_tile = 0; block_tile < td->nb_block_structure; block_tile++) {
1562  unsigned int row = td->block_structure[block_tile].row;
1563  unsigned int col = td->block_structure[block_tile].col;
1564  uint8_t seg_id = frame->segmentation_map[row * 8 * s->sb_cols + col];
1565 
1566  b->src_x = col * 8;
1567  b->src_y = row * 8;
1568  b->w = 1 << (3 + td->block_structure[block_tile].block_size_idx_x);
1569  b->h = 1 << (3 + td->block_structure[block_tile].block_size_idx_y);
1570 
1571  if (s->s.h.segmentation.feat[seg_id].q_enabled) {
1572  b->delta_qp = s->s.h.segmentation.feat[seg_id].q_val;
1573  if (s->s.h.segmentation.absolute_vals)
1574  b->delta_qp -= par->qp;
1575  }
1576  }
1577  }
1578  }
1579 
1580  return 0;
1581 }
1582 
1584  int *got_frame, AVPacket *pkt)
1585 {
1586  const uint8_t *data = pkt->data;
1587  int size = pkt->size;
1588  VP9Context *s = avctx->priv_data;
1589  int ret, i, j, ref;
1590  CodedBitstreamUnit *unit;
1591  VP9RawFrame *rf;
1592 
1593  int retain_segmap_ref = s->s.frames[REF_FRAME_SEGMAP].segmentation_map &&
1594  (!s->s.h.segmentation.enabled || !s->s.h.segmentation.update_map);
1595  const VP9Frame *src;
1596  AVFrame *f;
1597 
1598  ret = ff_cbs_read_packet(s->cbc, &s->current_frag, pkt);
1599  if (ret < 0) {
1600  ff_cbs_fragment_reset(&s->current_frag);
1601  av_log(avctx, AV_LOG_ERROR, "Failed to read frame header.\n");
1602  return ret;
1603  }
1604 
1605  unit = &s->current_frag.units[0];
1606  rf = unit->content;
1607 
1608  av_refstruct_replace(&s->header_ref, unit->content_ref);
1609  s->frame_header = &rf->header;
1610 
1611  if ((ret = decode_frame_header(avctx, data, size, &ref)) < 0) {
1612  ff_cbs_fragment_reset(&s->current_frag);
1613  return ret;
1614  } else if (ret == 0) {
1615  if (!s->s.refs[ref].f) {
1616  av_log(avctx, AV_LOG_ERROR, "Requested reference %d not available\n", ref);
1617  ff_cbs_fragment_reset(&s->current_frag);
1618  return AVERROR_INVALIDDATA;
1619  }
1620  for (int i = 0; i < 8; i++)
1621  ff_progress_frame_replace(&s->next_refs[i], &s->s.refs[i]);
1622  ff_thread_finish_setup(avctx);
1623  ff_progress_frame_await(&s->s.refs[ref], INT_MAX);
1624  ff_cbs_fragment_reset(&s->current_frag);
1625 
1626  if ((ret = av_frame_ref(frame, s->s.refs[ref].f)) < 0)
1627  return ret;
1628  frame->pts = pkt->pts;
1629  frame->pkt_dts = pkt->dts;
1630  *got_frame = 1;
1631  return pkt->size;
1632  }
1633  data += ret;
1634  size -= ret;
1635 
1636  src = !s->s.h.keyframe && !s->s.h.intraonly && !s->s.h.errorres ?
1637  &s->s.frames[CUR_FRAME] : &s->s.frames[BLANK_FRAME];
1638  if (!retain_segmap_ref || s->s.h.keyframe || s->s.h.intraonly)
1639  vp9_frame_replace(&s->s.frames[REF_FRAME_SEGMAP], src);
1640  vp9_frame_replace(&s->s.frames[REF_FRAME_MVPAIR], src);
1641  vp9_frame_unref(&s->s.frames[CUR_FRAME]);
1642  if ((ret = vp9_frame_alloc(avctx, &s->s.frames[CUR_FRAME])) < 0) {
1643  ff_cbs_fragment_reset(&s->current_frag);
1644  return ret;
1645  }
1646 
1647  s->s.frames[CUR_FRAME].header_ref = av_refstruct_ref(s->header_ref);
1648  s->s.frames[CUR_FRAME].frame_header = s->frame_header;
1649 
1650  f = s->s.frames[CUR_FRAME].tf.f;
1651  if (s->s.h.keyframe)
1652  f->flags |= AV_FRAME_FLAG_KEY;
1653  else
1654  f->flags &= ~AV_FRAME_FLAG_KEY;
1655  if (s->s.h.lossless)
1656  f->flags |= AV_FRAME_FLAG_LOSSLESS;
1657  else
1658  f->flags &= ~AV_FRAME_FLAG_LOSSLESS;
1659  f->pict_type = (s->s.h.keyframe || s->s.h.intraonly) ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1660 
1661  // Non-existent frames have the implicit dimension 0x0 != CUR_FRAME
1662  if (!s->s.frames[REF_FRAME_MVPAIR].tf.f ||
1663  (s->s.frames[REF_FRAME_MVPAIR].tf.f->width != s->s.frames[CUR_FRAME].tf.f->width ||
1664  s->s.frames[REF_FRAME_MVPAIR].tf.f->height != s->s.frames[CUR_FRAME].tf.f->height)) {
1665  vp9_frame_unref(&s->s.frames[REF_FRAME_SEGMAP]);
1666  }
1667 
1668  // ref frame setup
1669  for (i = 0; i < 8; i++) {
1670  ff_progress_frame_replace(&s->next_refs[i],
1671  s->s.h.refreshrefmask & (1 << i) ?
1672  &s->s.frames[CUR_FRAME].tf : &s->s.refs[i]);
1673  }
1674 
1675  if (avctx->hwaccel) {
1676  const FFHWAccel *hwaccel = ffhwaccel(avctx->hwaccel);
1677  ret = hwaccel->start_frame(avctx, pkt->buf, pkt->data, pkt->size);
1678  if (ret < 0)
1679  return ret;
1680  ret = hwaccel->decode_slice(avctx, pkt->data, pkt->size);
1681  if (ret < 0)
1682  return ret;
1683  ret = hwaccel->end_frame(avctx);
1684  if (ret < 0)
1685  return ret;
1686 
1687  for (i = 0; i < 8; i++) {
1688  vp9_frame_replace(&s->s.ref_frames[i],
1689  s->s.h.refreshrefmask & (1 << i) ?
1690  &s->s.frames[CUR_FRAME] : &s->s.ref_frames[i]);
1691  }
1692 
1693  goto finish;
1694  }
1695 
1696  // main tile decode loop
1697  memset(s->above_partition_ctx, 0, s->cols);
1698  memset(s->above_skip_ctx, 0, s->cols);
1699  if (s->s.h.keyframe || s->s.h.intraonly) {
1700  memset(s->above_mode_ctx, DC_PRED, s->cols * 2);
1701  } else {
1702  memset(s->above_mode_ctx, NEARESTMV, s->cols);
1703  }
1704  memset(s->above_y_nnz_ctx, 0, s->sb_cols * 16);
1705  memset(s->above_uv_nnz_ctx[0], 0, s->sb_cols * 16 >> s->ss_h);
1706  memset(s->above_uv_nnz_ctx[1], 0, s->sb_cols * 16 >> s->ss_h);
1707  memset(s->above_segpred_ctx, 0, s->cols);
1708  s->pass = s->s.frames[CUR_FRAME].uses_2pass =
1709  avctx->active_thread_type == FF_THREAD_FRAME && s->s.h.refreshctx && !s->s.h.parallelmode;
1710  if ((ret = update_block_buffers(avctx)) < 0) {
1711  av_log(avctx, AV_LOG_ERROR,
1712  "Failed to allocate block buffers\n");
1713  return ret;
1714  }
1715  if (s->s.h.refreshctx && s->s.h.parallelmode) {
1716  int j, k, l, m;
1717 
1718  for (i = 0; i < 4; i++) {
1719  for (j = 0; j < 2; j++)
1720  for (k = 0; k < 2; k++)
1721  for (l = 0; l < 6; l++)
1722  for (m = 0; m < 6; m++)
1723  memcpy(s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m],
1724  s->prob.coef[i][j][k][l][m], 3);
1725  if (s->s.h.txfmmode == i)
1726  break;
1727  }
1728  s->prob_ctx[s->s.h.framectxid].p = s->prob.p;
1729  ff_thread_finish_setup(avctx);
1730  } else if (!s->s.h.refreshctx) {
1731  ff_thread_finish_setup(avctx);
1732  }
1733 
1734 #if HAVE_THREADS
1735  if (avctx->active_thread_type & FF_THREAD_SLICE) {
1736  for (i = 0; i < s->sb_rows; i++)
1737  atomic_init(&s->entries[i], 0);
1738  }
1739 #endif
1740 
1741  do {
1742  for (i = 0; i < s->active_tile_cols; i++) {
1743  s->td[i].b = s->td[i].b_base;
1744  s->td[i].block = s->td[i].block_base;
1745  s->td[i].uvblock[0] = s->td[i].uvblock_base[0];
1746  s->td[i].uvblock[1] = s->td[i].uvblock_base[1];
1747  s->td[i].eob = s->td[i].eob_base;
1748  s->td[i].uveob[0] = s->td[i].uveob_base[0];
1749  s->td[i].uveob[1] = s->td[i].uveob_base[1];
1750  s->td[i].error_info = 0;
1751  }
1752 
1753 #if HAVE_THREADS
1754  if (avctx->active_thread_type == FF_THREAD_SLICE) {
1755  int tile_row, tile_col;
1756 
1757  av_assert1(!s->pass);
1758 
1759  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1760  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1761  int64_t tile_size;
1762 
1763  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1764  tile_row == s->s.h.tiling.tile_rows - 1) {
1765  tile_size = size;
1766  } else {
1767  tile_size = AV_RB32(data);
1768  data += 4;
1769  size -= 4;
1770  }
1771  if (tile_size > size)
1772  return AVERROR_INVALIDDATA;
1773  ret = ff_vpx_init_range_decoder(&s->td[tile_col].c_b[tile_row], data, tile_size);
1774  if (ret < 0)
1775  return ret;
1776  if (vpx_rac_get_prob_branchy(&s->td[tile_col].c_b[tile_row], 128)) // marker bit
1777  return AVERROR_INVALIDDATA;
1778  data += tile_size;
1779  size -= tile_size;
1780  }
1781  }
1782 
1783  ff_slice_thread_execute_with_mainfunc(avctx, decode_tiles_mt, loopfilter_proc, s->td, NULL, s->s.h.tiling.tile_cols);
1784  } else
1785 #endif
1786  {
1787  ret = decode_tiles(avctx, data, size);
1788  if (ret < 0)
1789  goto fail;
1790  }
1791 
1792  // Sum all counts fields into td[0].counts for tile threading
1793  if (avctx->active_thread_type == FF_THREAD_SLICE)
1794  for (i = 1; i < s->s.h.tiling.tile_cols; i++)
1795  for (j = 0; j < sizeof(s->td[i].counts) / sizeof(unsigned); j++)
1796  ((unsigned *)&s->td[0].counts)[j] += ((unsigned *)&s->td[i].counts)[j];
1797 
1798  if (s->pass < 2 && s->s.h.refreshctx && !s->s.h.parallelmode) {
1800  ff_thread_finish_setup(avctx);
1801  }
1802  } while (s->pass++ == 1);
1803 
1804  if (s->td->error_info < 0) {
1805  av_log(avctx, AV_LOG_ERROR, "Failed to decode tile data\n");
1806  s->td->error_info = 0;
1808  goto fail;
1809  }
1811  ret = vp9_export_enc_params(s, &s->s.frames[CUR_FRAME]);
1812  if (ret < 0)
1813  goto fail;
1814  }
1815 
1816 finish:
1817  ff_cbs_fragment_reset(&s->current_frag);
1818 
1819  ff_progress_frame_report(&s->s.frames[CUR_FRAME].tf, INT_MAX);
1820  // ref frame setup
1821  for (int i = 0; i < 8; i++)
1822  ff_progress_frame_replace(&s->s.refs[i], &s->next_refs[i]);
1823 
1824  if (!s->s.h.invisible) {
1825  if ((ret = av_frame_ref(frame, s->s.frames[CUR_FRAME].tf.f)) < 0)
1826  return ret;
1827  *got_frame = 1;
1828  }
1829 
1830  return pkt->size;
1831 fail:
1832  ff_cbs_fragment_reset(&s->current_frag);
1833  ff_progress_frame_report(&s->s.frames[CUR_FRAME].tf, INT_MAX);
1834  return ret;
1835 }
1836 
1838 {
1839  VP9Context *s = avctx->priv_data;
1840  int i;
1841 
1842  for (i = 0; i < 3; i++)
1843  vp9_frame_unref(&s->s.frames[i]);
1844 
1845  for (i = 0; i < 8; i++) {
1846  ff_progress_frame_unref(&s->s.refs[i]);
1847  vp9_frame_unref(&s->s.ref_frames[i]);
1848  }
1849 
1850  ff_cbs_fragment_reset(&s->current_frag);
1851  ff_cbs_flush(s->cbc);
1852 
1853  if (FF_HW_HAS_CB(avctx, flush))
1854  FF_HW_SIMPLE_CALL(avctx, flush);
1855 }
1856 
1858 {
1859  VP9Context *s = avctx->priv_data;
1860  int ret;
1861 
1862  s->last_bpp = 0;
1863  s->s.h.filter.sharpness = -1;
1864 
1865  ret = ff_cbs_init(&s->cbc, AV_CODEC_ID_VP9, avctx);
1866  if (ret < 0)
1867  return ret;
1868 
1869 #if HAVE_THREADS
1870  if (avctx->active_thread_type & FF_THREAD_SLICE) {
1871  ret = ff_pthread_init(s, vp9_context_offsets);
1872  if (ret < 0)
1873  return ret;
1874  }
1875 #endif
1876 
1877  return 0;
1878 }
1879 
1880 #if HAVE_THREADS
1881 static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
1882 {
1883  VP9Context *s = dst->priv_data, *ssrc = src->priv_data;
1884 
1885  for (int i = 0; i < 3; i++)
1886  vp9_frame_replace(&s->s.frames[i], &ssrc->s.frames[i]);
1887  for (int i = 0; i < 8; i++)
1888  ff_progress_frame_replace(&s->s.refs[i], &ssrc->next_refs[i]);
1889  av_refstruct_replace(&s->frame_extradata_pool, ssrc->frame_extradata_pool);
1890  s->frame_extradata_pool_size = ssrc->frame_extradata_pool_size;
1891 
1892  av_refstruct_replace(&s->header_ref, ssrc->header_ref);
1893  for (int i = 0; i < 8; i++)
1894  vp9_frame_replace(&s->s.ref_frames[i], &ssrc->s.ref_frames[i]);
1895 
1896  s->frame_header = ssrc->frame_header;
1897  memcpy(s->cbc->priv_data, ssrc->cbc->priv_data, sizeof(CodedBitstreamVP9Context));
1898 
1899  s->s.h.invisible = ssrc->s.h.invisible;
1900  s->s.h.keyframe = ssrc->s.h.keyframe;
1901  s->s.h.intraonly = ssrc->s.h.intraonly;
1902  s->ss_v = ssrc->ss_v;
1903  s->ss_h = ssrc->ss_h;
1904  s->s.h.segmentation.enabled = ssrc->s.h.segmentation.enabled;
1905  s->s.h.segmentation.update_map = ssrc->s.h.segmentation.update_map;
1906  s->s.h.segmentation.absolute_vals = ssrc->s.h.segmentation.absolute_vals;
1907  s->bytesperpixel = ssrc->bytesperpixel;
1908  s->gf_fmt = ssrc->gf_fmt;
1909  s->w = ssrc->w;
1910  s->h = ssrc->h;
1911  s->s.h.bpp = ssrc->s.h.bpp;
1912  s->bpp_index = ssrc->bpp_index;
1913  s->pix_fmt = ssrc->pix_fmt;
1914  memcpy(&s->prob_ctx, &ssrc->prob_ctx, sizeof(s->prob_ctx));
1915  memcpy(&s->s.h.lf_delta, &ssrc->s.h.lf_delta, sizeof(s->s.h.lf_delta));
1916  memcpy(&s->s.h.segmentation.feat, &ssrc->s.h.segmentation.feat,
1917  sizeof(s->s.h.segmentation.feat));
1918 
1919  return 0;
1920 }
1921 #endif
1922 
1924  .p.name = "vp9",
1925  CODEC_LONG_NAME("Google VP9"),
1926  .p.type = AVMEDIA_TYPE_VIDEO,
1927  .p.id = AV_CODEC_ID_VP9,
1928  .priv_data_size = sizeof(VP9Context),
1929  .init = vp9_decode_init,
1933  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
1936  .flush = vp9_decode_flush,
1937  UPDATE_THREAD_CONTEXT(vp9_decode_update_thread_context),
1938  .p.profiles = NULL_IF_CONFIG_SMALL(ff_vp9_profiles),
1939  .bsfs = "vp9_superframe_split",
1940  .hw_configs = (const AVCodecHWConfigInternal *const []) {
1941 #if CONFIG_VP9_DXVA2_HWACCEL
1942  HWACCEL_DXVA2(vp9),
1943 #endif
1944 #if CONFIG_VP9_D3D11VA_HWACCEL
1945  HWACCEL_D3D11VA(vp9),
1946 #endif
1947 #if CONFIG_VP9_D3D11VA2_HWACCEL
1948  HWACCEL_D3D11VA2(vp9),
1949 #endif
1950 #if CONFIG_VP9_D3D12VA_HWACCEL
1951  HWACCEL_D3D12VA(vp9),
1952 #endif
1953 #if CONFIG_VP9_NVDEC_HWACCEL
1954  HWACCEL_NVDEC(vp9),
1955 #endif
1956 #if CONFIG_VP9_VAAPI_HWACCEL
1957  HWACCEL_VAAPI(vp9),
1958 #endif
1959 #if CONFIG_VP9_VDPAU_HWACCEL
1960  HWACCEL_VDPAU(vp9),
1961 #endif
1962 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
1963  HWACCEL_VIDEOTOOLBOX(vp9),
1964 #endif
1965 #if CONFIG_VP9_VULKAN_HWACCEL
1966  HWACCEL_VULKAN(vp9),
1967 #endif
1968  NULL
1969  },
1970 };
VP9TileData::left_y_nnz_ctx
uint8_t left_y_nnz_ctx[16]
Definition: vp9dec.h:216
HWACCEL_D3D12VA
#define HWACCEL_D3D12VA(codec)
Definition: hwconfig.h:80
AVVideoEncParams::qp
int32_t qp
Base quantisation parameter for the frame.
Definition: video_enc_params.h:103
hwconfig.h
ff_progress_frame_report
void ff_progress_frame_report(ProgressFrame *f, int n)
Notify later decoding threads when part of their reference frame is ready.
Definition: decode.c:1967
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1417
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
FF_CODEC_CAP_SLICE_THREAD_HAS_MF
#define FF_CODEC_CAP_SLICE_THREAD_HAS_MF
Codec initializes slice-based threading with a main function.
Definition: codec_internal.h:65
decode_tiles
static int decode_tiles(AVCodecContext *avctx, const uint8_t *data, int size)
Definition: vp9.c:1292
CodedBitstreamUnit::content_ref
void * content_ref
If content is reference counted, a RefStruct reference backing content.
Definition: cbs.h:119
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
vp9_frame_alloc
static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
Definition: vp9.c:107
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:43
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
PRED_SWITCHABLE
@ PRED_SWITCHABLE
Definition: vp9shared.h:53
PRED_SINGLEREF
@ PRED_SINGLEREF
Definition: vp9shared.h:51
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:667
VP9TileData::uvblock_base
int16_t * uvblock_base[2]
Definition: vp9dec.h:232
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1220
VP9TileData::partition
unsigned partition[4][4][4]
Definition: vp9dec.h:207
VP9Frame
Definition: vp9shared.h:66
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:124
ff_vp9_decoder
const FFCodec ff_vp9_decoder
Definition: vp9.c:1923
decode_sb
static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1118
ff_vp9_adapt_probs
void ff_vp9_adapt_probs(VP9Context *s)
Definition: vp9prob.c:44
CodedBitstreamUnit::content
void * content
Pointer to the decomposed form of this unit.
Definition: cbs.h:114
int64_t
long long int64_t
Definition: coverity.c:34
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:254
VP9TileData::left_skip_ctx
uint8_t left_skip_ctx[8]
Definition: vp9dec.h:221
VP9TileData::row
int row
Definition: vp9dec.h:177
VP9TileData::counts
struct VP9TileData::@330 counts
PRED_COMPREF
@ PRED_COMPREF
Definition: vp9shared.h:52
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:434
pixdesc.h
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:64
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:777
BlockPartition
BlockPartition
Definition: vp9shared.h:36
AVPacket::data
uint8_t * data
Definition: packet.h:595
DC_PRED
@ DC_PRED
Definition: vp9.h:48
pthread_mutex_lock
static av_always_inline int pthread_mutex_lock(pthread_mutex_t *mutex)
Definition: os2threads.h:119
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:66
b
#define b
Definition: input.c:42
ff_progress_frame_get_buffer
int ff_progress_frame_get_buffer(AVCodecContext *avctx, ProgressFrame *f, int flags)
Wrapper around ff_progress_frame_alloc() and ff_thread_get_buffer().
Definition: decode.c:1927
data
const char data[16]
Definition: mxf.c:149
update_size
static int update_size(AVCodecContext *avctx, int w, int h)
Definition: vp9.c:165
decode_sb_mem
static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1198
REF_FRAME_SEGMAP
#define REF_FRAME_SEGMAP
Definition: vp9shared.h:174
decode_frame_header
static int decode_frame_header(AVCodecContext *avctx, const uint8_t *data, int size, int *ref)
Definition: vp9.c:521
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:539
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:254
FFCodec
Definition: codec_internal.h:127
VP9TileData::c_b
VPXRangeCoder * c_b
Definition: vp9dec.h:175
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:701
VP9TileData::left_segpred_ctx
uint8_t left_segpred_ctx[8]
Definition: vp9dec.h:223
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:176
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
VP9_SYNCCODE
#define VP9_SYNCCODE
Definition: vp9.c:50
VP9Block::bl
enum BlockLevel bl
Definition: vp9dec.h:91
vp89_rac.h
VP9Filter
Definition: vp9dec.h:79
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
VP9TileData::b
VP9Block * b
Definition: vp9dec.h:180
VPXRangeCoder
Definition: vpx_rac.h:35
thread.h
ff_pthread_free
av_cold void ff_pthread_free(void *obj, const unsigned offsets[])
Definition: pthread.c:92
AV_PIX_FMT_VULKAN
@ AV_PIX_FMT_VULKAN
Vulkan hardware images.
Definition: pixfmt.h:379
FILTER_SWITCHABLE
@ FILTER_SWITCHABLE
Definition: vp9.h:70
CodedBitstreamUnit
Coded bitstream unit structure.
Definition: cbs.h:77
VP9Block
Definition: vp9dec.h:85
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:383
close
static av_cold void close(AVCodecParserContext *s)
Definition: apv_parser.c:197
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:706
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:337
AVCOL_SPC_RESERVED
@ AVCOL_SPC_RESERVED
reserved for future use by ITU-T and ISO/IEC just like 15-255 are
Definition: pixfmt.h:704
TX_SWITCHABLE
@ TX_SWITCHABLE
Definition: vp9.h:33
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
finish
static void finish(void)
Definition: movenc.c:374
FFHWAccel
Definition: hwaccel_internal.h:34
ff_vp9_ac_qlookup
const int16_t ff_vp9_ac_qlookup[3][256]
Definition: vp9data.c:334
AVVideoEncParams::delta_qp
int32_t delta_qp[4][2]
Quantisation parameter offset from the base (per-frame) qp for a given plane (first index) and AC/DC ...
Definition: video_enc_params.h:109
fail
#define fail()
Definition: checkasm.h:224
AV_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME
#define AV_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME
If this flag is set, the entries will be zeroed before being returned to the user (after the init or ...
Definition: refstruct.h:221
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:558
GetBitContext
Definition: get_bits.h:109
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:496
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:72
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
PARTITION_NONE
@ PARTITION_NONE
Definition: vp9shared.h:37
vp9_frame_unref
static void vp9_frame_unref(VP9Frame *f)
Definition: vp9.c:98
progressframe.h
refstruct.h
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:73
VP9TileData::col
int col
Definition: vp9dec.h:177
vp9_decode_free
static av_cold int vp9_decode_free(AVCodecContext *avctx)
Definition: vp9.c:1264
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:542
avassert.h
FF_CODEC_CAP_USES_PROGRESSFRAMES
#define FF_CODEC_CAP_USES_PROGRESSFRAMES
The decoder might make use of the ProgressFrame API.
Definition: codec_internal.h:69
ff_vp9_model_pareto8
const uint8_t ff_vp9_model_pareto8[256][8]
Definition: vp9data.c:1176
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:111
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:544
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1650
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:649
BL_8X8
@ BL_8X8
Definition: vp9shared.h:83
PARTITION_V
@ PARTITION_V
Definition: vp9shared.h:39
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:347
ff_hwaccel_frame_priv_alloc
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private)
Allocate a hwaccel frame private data if the provided avctx uses a hwaccel method that needs it.
Definition: decode.c:2324
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:134
s
#define s(width, name)
Definition: cbs_vp9.c:198
pthread_mutex_unlock
static av_always_inline int pthread_mutex_unlock(pthread_mutex_t *mutex)
Definition: os2threads.h:126
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:707
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:411
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:222
vp9data.h
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:42
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
ff_progress_frame_unref
void ff_progress_frame_unref(ProgressFrame *f)
Give up a reference to the underlying frame contained in a ProgressFrame and reset the ProgressFrame,...
Definition: decode.c:1950
ff_progress_frame_await
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_progress_frame_await() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_progress_frame_report() has been called on them. This includes draw_edges(). Porting codecs to frame threading
decode.h
get_bits.h
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
VP9TileData::block_size_idx_x
unsigned int block_size_idx_x
Definition: vp9dec.h:240
ff_vp9dsp_init
av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact)
Definition: vp9dsp.c:88
ff_vp9_partition_tree
const int8_t ff_vp9_partition_tree[3][2]
Definition: vp9data.c:35
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
av_mallocz
#define av_mallocz(s)
Definition: tableprint_vlc.h:31
vp9_decode_frame
static int vp9_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: vp9.c:1583
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:332
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:95
AVPacket::buf
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: packet.h:578
NULL
#define NULL
Definition: coverity.c:32
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:677
hwaccel_internal.h
VP9Context
Definition: vp9dec.h:97
av_unreachable
#define av_unreachable(msg)
Asserts that are used as compiler optimization hints depending upon ASSERT_LEVEL and NBDEBUG.
Definition: avassert.h:116
REF_FRAME_MVPAIR
#define REF_FRAME_MVPAIR
Definition: vp9shared.h:173
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:391
vp89_rac_get_uint
static av_unused int vp89_rac_get_uint(VPXRangeCoder *c, int bits)
Definition: vp89_rac.h:41
profiles.h
AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:541
flush
void(* flush)(AVBSFContext *ctx)
Definition: dts2pts.c:552
av_refstruct_pool_get
void * av_refstruct_pool_get(AVRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
pthread_internal.h
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:341
AV_PIX_FMT_D3D12
@ AV_PIX_FMT_D3D12
Hardware surfaces for Direct3D 12.
Definition: pixfmt.h:440
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:540
VP9mv
Definition: vp9shared.h:56
PARTITION_SPLIT
@ PARTITION_SPLIT
Definition: vp9shared.h:40
FF_HW_HAS_CB
#define FF_HW_HAS_CB(avctx, function)
Definition: hwaccel_internal.h:179
VP9RawFrame
Definition: cbs_vp9.h:164
atomic_load_explicit
#define atomic_load_explicit(object, order)
Definition: stdatomic.h:96
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
vp9_frame_replace
static void vp9_frame_replace(VP9Frame *dst, const VP9Frame *src)
Definition: vp9.c:148
VP9RawFrame::header
VP9RawFrameHeader header
Definition: cbs_vp9.h:165
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:58
vp9.h
f
f
Definition: af_crystalizer.c:122
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:550
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:596
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
codec_internal.h
VP9TileData::eob_base
uint8_t * eob_base
Definition: vp9dec.h:233
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
pix_fmt_rgb
static enum AVPixelFormat pix_fmt_rgb[3]
Definition: libdav1d.c:66
i
#define i(width, name, range_min, range_max)
Definition: cbs_h264.c:63
REF_INVALID_SCALE
#define REF_INVALID_SCALE
Definition: vp9dec.h:43
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
read_colorspace_details
static int read_colorspace_details(AVCodecContext *avctx)
Definition: vp9.c:459
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:544
size
int size
Definition: twinvq_data.h:10344
vp9_alloc_entries
static int vp9_alloc_entries(AVCodecContext *avctx, int n)
Definition: vp9.c:88
atomic_fetch_add_explicit
#define atomic_fetch_add_explicit(object, operand, order)
Definition: stdatomic.h:149
VP9TileData::b_base
VP9Block * b_base
Definition: vp9dec.h:180
free_buffers
static void free_buffers(VP9Context *s)
Definition: vp9.c:1255
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:546
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1585
AVCodecHWConfigInternal
Definition: hwconfig.h:25
TX_4X4
@ TX_4X4
Definition: vp9.h:28
update_block_buffers
static int update_block_buffers(AVCodecContext *avctx)
Definition: vp9.c:324
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:594
av_refstruct_ref
void * av_refstruct_ref(void *obj)
Create a new reference to an object managed via this API, i.e.
Definition: refstruct.c:140
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:99
CodedBitstreamVP9Context
Definition: cbs_vp9.h:192
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:78
attributes.h
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:336
VP9TileData::block_base
int16_t * block_base
Definition: vp9dec.h:232
inv_recenter_nonneg
static av_always_inline int inv_recenter_nonneg(int v, int m)
Definition: vp9.c:391
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
vpx_rac_is_end
static av_always_inline int vpx_rac_is_end(VPXRangeCoder *c)
returns 1 if the end of the stream has been reached, 0 otherwise.
Definition: vpx_rac.h:51
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:126
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1584
VP9TileData::left_uv_nnz_ctx
uint8_t left_uv_nnz_ctx[2][16]
Definition: vp9dec.h:219
av_refstruct_unref
void av_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:194
ff_slice_thread_execute_with_mainfunc
int ff_slice_thread_execute_with_mainfunc(AVCodecContext *avctx, action_func2 *func2, main_func *mainfunc, void *arg, int *ret, int job_count)
Definition: pthread_slice.c:104
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
Definition: pixfmt.h:708
assign
#define assign(var, type, n)
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:305
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
update_prob
static int update_prob(VPXRangeCoder *c, int p)
Definition: vp9.c:401
av_malloc
#define av_malloc(s)
Definition: ops_asmgen.c:44
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:588
DEFINE_OFFSET_ARRAY
#define DEFINE_OFFSET_ARRAY(type, name, cnt_variable, mutexes, conds)
Definition: pthread_internal.h:61
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:711
vpx_rac.h
decode012
static int BS_FUNC() decode012(BSCTX *bc)
Return decoded truncated unary code for the values 0, 1, 2.
Definition: bitstream_template.h:444
VP9TileData::block_size_idx_y
unsigned int block_size_idx_y
Definition: vp9dec.h:241
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:559
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:700
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:58
av_always_inline
#define av_always_inline
Definition: attributes.h:68
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
vpx_rac_get_prob_branchy
static av_always_inline int vpx_rac_get_prob_branchy(VPXRangeCoder *c, int prob)
Definition: vpx_rac.h:99
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:120
VP9TileData::block_structure
struct VP9TileData::@332 * block_structure
get_sbits_inv
static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
Definition: vp9.c:385
VP9TileData::left_mode_ctx
uint8_t left_mode_ctx[16]
Definition: vp9dec.h:217
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:703
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:639
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:760
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
VP9TileData::c
VPXRangeCoder * c
Definition: vp9dec.h:176
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:74
avcodec.h
limit
static double limit(double x)
Definition: vf_pseudocolor.c:142
vp89_rac_get_tree
static av_always_inline int vp89_rac_get_tree(VPXRangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
Definition: vp89_rac.h:54
VP9TileData::s
const VP9Context * s
Definition: vp9dec.h:174
BL_64X64
@ BL_64X64
Definition: vp9shared.h:80
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
vp9_decode_init
static av_cold int vp9_decode_init(AVCodecContext *avctx)
Definition: vp9.c:1857
tile
static int FUNC() tile(CodedBitstreamContext *ctx, RWContext *rw, APVRawTile *current, int tile_idx, uint32_t tile_size)
Definition: cbs_apv_syntax_template.c:224
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:560
hwaccel
static const char * hwaccel
Definition: ffplay.c:356
ff_vpx_init_range_decoder
int ff_vpx_init_range_decoder(VPXRangeCoder *c, const uint8_t *buf, int buf_size)
Definition: vpx_rac.c:42
av_refstruct_pool_alloc
AVRefStructPool * av_refstruct_pool_alloc(size_t size, unsigned flags)
Equivalent to av_refstruct_pool_alloc(size, flags, NULL, NULL, NULL, NULL, NULL)
Definition: refstruct.c:335
vp9_tile_data_free
static void vp9_tile_data_free(VP9TileData *td)
Definition: vp9.c:91
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
VP9mvrefPair
Definition: vp9shared.h:61
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:543
pthread_cond_signal
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
Definition: os2threads.h:152
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:395
ff_progress_frame_replace
void ff_progress_frame_replace(ProgressFrame *dst, const ProgressFrame *src)
Do nothing if dst and src already refer to the same AVFrame; otherwise unreference dst and if src is ...
Definition: decode.c:1957
VP9TileData
Definition: vp9dec.h:173
VP9TileData::uveob_base
uint8_t * uveob_base[2]
Definition: vp9dec.h:233
HWACCEL_VULKAN
#define HWACCEL_VULKAN(codec)
Definition: hwconfig.h:76
vp89_rac_get
static av_always_inline int vp89_rac_get(VPXRangeCoder *c)
Definition: vp89_rac.h:36
AVCodecContext
main external API structure.
Definition: avcodec.h:439
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1592
VP9Filter::mask
uint8_t mask[2][2][8][4]
Definition: vp9dec.h:82
av_refstruct_replace
void av_refstruct_replace(void *dstp, const void *src)
Ensure *dstp refers to the same object as src.
Definition: refstruct.c:160
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1630
ffhwaccel
static const FFHWAccel * ffhwaccel(const AVHWAccel *codec)
Definition: hwaccel_internal.h:168
ff_vp9_decode_block
void ff_vp9_decode_block(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl, enum BlockPartition bp)
Definition: vp9block.c:1264
NEARESTMV
@ NEARESTMV
Definition: vp9shared.h:44
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
BlockLevel
BlockLevel
Definition: vp9shared.h:79
AVCodecContext::export_side_data
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame,...
Definition: avcodec.h:1786
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
ff_pthread_init
av_cold int ff_pthread_init(void *obj, const unsigned offsets[])
Initialize/destroy a list of mutexes/conditions contained in a structure.
Definition: pthread.c:105
pthread_cond_wait
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
Definition: os2threads.h:192
vp9dec.h
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:279
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
ff_vp9_default_kf_partition_probs
const uint8_t ff_vp9_default_kf_partition_probs[4][4][3]
Definition: vp9data.c:41
AV_VIDEO_ENC_PARAMS_VP9
@ AV_VIDEO_ENC_PARAMS_VP9
VP9 stores:
Definition: video_enc_params.h:44
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:322
ff_vp9_default_probs
const ProbContext ff_vp9_default_probs
Definition: vp9data.c:1435
CUR_FRAME
#define CUR_FRAME
Definition: vp9shared.h:172
ff_vp9_loopfilter_sb
void ff_vp9_loopfilter_sb(struct AVCodecContext *avctx, VP9Filter *lflvl, int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff)
Definition: vp9lpf.c:179
w
uint8_t w
Definition: llvidencdsp.c:39
av_refstruct_pool_uninit
static void av_refstruct_pool_uninit(AVRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
vp9_export_enc_params
static int vp9_export_enc_params(VP9Context *s, VP9Frame *frame)
Definition: vp9.c:1531
AVPacket
This structure stores compressed data.
Definition: packet.h:572
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:466
PARTITION_H
@ PARTITION_H
Definition: vp9shared.h:38
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
videodsp.h
BLANK_FRAME
#define BLANK_FRAME
Definition: vp9shared.h:175
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
AVCodecContext::properties
attribute_deprecated unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1649
HWACCEL_MAX
#define HWACCEL_MAX
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Get the block at the specified.
Definition: video_enc_params.h:143
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:545
h
h
Definition: vp9dsp_template.c:2070
pkt
static AVPacket * pkt
Definition: demux_decode.c:55
atomic_init
#define atomic_init(obj, value)
Definition: stdatomic.h:33
VP9TileData::nb_block_structure
unsigned int nb_block_structure
Definition: vp9dec.h:243
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:702
VP9TileData::tile_col_start
unsigned tile_col_start
Definition: vp9dec.h:181
AV_FRAME_FLAG_LOSSLESS
#define AV_FRAME_FLAG_LOSSLESS
A decoder can use this flag to mark frames which were originally encoded losslessly.
Definition: frame.h:670
src
#define src
Definition: vp8dsp.c:248
ff_vp9_profiles
const AVProfile ff_vp9_profiles[]
Definition: profiles.c:155
video_enc_params.h
set_tile_offset
static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
Definition: vp9.c:1247
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3376
ff_vp9_dc_qlookup
const int16_t ff_vp9_dc_qlookup[3][256]
Definition: vp9data.c:231
ff_vp9_default_coef_probs
const uint8_t ff_vp9_default_coef_probs[4][2][2][6][6][3]
Definition: vp9data.c:1540
vp9_decode_flush
static av_cold void vp9_decode_flush(AVCodecContext *avctx)
Definition: vp9.c:1837
VP9TileData::left_partition_ctx
uint8_t left_partition_ctx[8]
Definition: vp9dec.h:220