FFmpeg
vp9.c
Go to the documentation of this file.
1 /*
2  * VP9 compatible video decoder
3  *
4  * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5  * Copyright (C) 2013 Clément Bœsch <u pkh me>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "config_components.h"
25 
26 #include "avcodec.h"
27 #include "codec_internal.h"
28 #include "decode.h"
29 #include "get_bits.h"
30 #include "hwaccel_internal.h"
31 #include "hwconfig.h"
32 #include "profiles.h"
33 #include "thread.h"
34 #include "threadframe.h"
35 #include "pthread_internal.h"
36 
37 #include "videodsp.h"
38 #include "vp89_rac.h"
39 #include "vp9.h"
40 #include "vp9data.h"
41 #include "vp9dec.h"
42 #include "vpx_rac.h"
43 #include "libavutil/avassert.h"
44 #include "libavutil/pixdesc.h"
46 
47 #define VP9_SYNCCODE 0x498342
48 
49 #if HAVE_THREADS
50 DEFINE_OFFSET_ARRAY(VP9Context, vp9_context, pthread_init_cnt,
51  (offsetof(VP9Context, progress_mutex)),
52  (offsetof(VP9Context, progress_cond)));
53 
54 static int vp9_alloc_entries(AVCodecContext *avctx, int n) {
55  VP9Context *s = avctx->priv_data;
56  int i;
57 
58  if (avctx->active_thread_type & FF_THREAD_SLICE) {
59  if (s->entries)
60  av_freep(&s->entries);
61 
62  s->entries = av_malloc_array(n, sizeof(atomic_int));
63  if (!s->entries)
64  return AVERROR(ENOMEM);
65 
66  for (i = 0; i < n; i++)
67  atomic_init(&s->entries[i], 0);
68  }
69  return 0;
70 }
71 
72 static void vp9_report_tile_progress(VP9Context *s, int field, int n) {
73  pthread_mutex_lock(&s->progress_mutex);
74  atomic_fetch_add_explicit(&s->entries[field], n, memory_order_release);
75  pthread_cond_signal(&s->progress_cond);
76  pthread_mutex_unlock(&s->progress_mutex);
77 }
78 
79 static void vp9_await_tile_progress(VP9Context *s, int field, int n) {
80  if (atomic_load_explicit(&s->entries[field], memory_order_acquire) >= n)
81  return;
82 
83  pthread_mutex_lock(&s->progress_mutex);
84  while (atomic_load_explicit(&s->entries[field], memory_order_relaxed) != n)
85  pthread_cond_wait(&s->progress_cond, &s->progress_mutex);
86  pthread_mutex_unlock(&s->progress_mutex);
87 }
88 #else
89 static int vp9_alloc_entries(AVCodecContext *avctx, int n) { return 0; }
90 #endif
91 
93 {
94  av_freep(&td->b_base);
95  av_freep(&td->block_base);
96  av_freep(&td->block_structure);
97 }
98 
100 {
101  ff_thread_release_ext_buffer(avctx, &f->tf);
102  av_buffer_unref(&f->extradata);
103  av_buffer_unref(&f->hwaccel_priv_buf);
104  f->segmentation_map = NULL;
105  f->hwaccel_picture_private = NULL;
106 }
107 
109 {
110  VP9Context *s = avctx->priv_data;
111  int ret, sz;
112 
114  if (ret < 0)
115  return ret;
116 
117  sz = 64 * s->sb_cols * s->sb_rows;
118  if (sz != s->frame_extradata_pool_size) {
119  av_buffer_pool_uninit(&s->frame_extradata_pool);
120  s->frame_extradata_pool = av_buffer_pool_init(sz * (1 + sizeof(VP9mvrefPair)), NULL);
121  if (!s->frame_extradata_pool) {
122  s->frame_extradata_pool_size = 0;
123  ret = AVERROR(ENOMEM);
124  goto fail;
125  }
126  s->frame_extradata_pool_size = sz;
127  }
128  f->extradata = av_buffer_pool_get(s->frame_extradata_pool);
129  if (!f->extradata) {
130  ret = AVERROR(ENOMEM);
131  goto fail;
132  }
133  memset(f->extradata->data, 0, f->extradata->size);
134 
135  f->segmentation_map = f->extradata->data;
136  f->mv = (VP9mvrefPair *) (f->extradata->data + sz);
137 
138  ret = ff_hwaccel_frame_priv_alloc(avctx, &f->hwaccel_picture_private,
139  &f->hwaccel_priv_buf);
140  if (ret < 0)
141  goto fail;
142 
143  return 0;
144 
145 fail:
146  vp9_frame_unref(avctx, f);
147  return ret;
148 }
149 
151 {
152  int ret;
153 
154  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
155  if (ret < 0)
156  return ret;
157 
158  dst->extradata = av_buffer_ref(src->extradata);
159  if (!dst->extradata)
160  goto fail;
161 
162  dst->segmentation_map = src->segmentation_map;
163  dst->mv = src->mv;
164  dst->uses_2pass = src->uses_2pass;
165 
166  if (src->hwaccel_picture_private) {
167  dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
168  if (!dst->hwaccel_priv_buf)
169  goto fail;
171  }
172 
173  return 0;
174 
175 fail:
176  vp9_frame_unref(avctx, dst);
177  return AVERROR(ENOMEM);
178 }
179 
180 static int update_size(AVCodecContext *avctx, int w, int h)
181 {
182 #define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + \
183  CONFIG_VP9_D3D11VA_HWACCEL * 2 + \
184  CONFIG_VP9_NVDEC_HWACCEL + \
185  CONFIG_VP9_VAAPI_HWACCEL + \
186  CONFIG_VP9_VDPAU_HWACCEL + \
187  CONFIG_VP9_VIDEOTOOLBOX_HWACCEL)
188  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
189  VP9Context *s = avctx->priv_data;
190  uint8_t *p;
191  int bytesperpixel = s->bytesperpixel, ret, cols, rows;
192  int lflvl_len, i;
193 
194  av_assert0(w > 0 && h > 0);
195 
196  if (!(s->pix_fmt == s->gf_fmt && w == s->w && h == s->h)) {
197  if ((ret = ff_set_dimensions(avctx, w, h)) < 0)
198  return ret;
199 
200  switch (s->pix_fmt) {
201  case AV_PIX_FMT_YUV420P:
203 #if CONFIG_VP9_DXVA2_HWACCEL
204  *fmtp++ = AV_PIX_FMT_DXVA2_VLD;
205 #endif
206 #if CONFIG_VP9_D3D11VA_HWACCEL
207  *fmtp++ = AV_PIX_FMT_D3D11VA_VLD;
208  *fmtp++ = AV_PIX_FMT_D3D11;
209 #endif
210 #if CONFIG_VP9_NVDEC_HWACCEL
211  *fmtp++ = AV_PIX_FMT_CUDA;
212 #endif
213 #if CONFIG_VP9_VAAPI_HWACCEL
214  *fmtp++ = AV_PIX_FMT_VAAPI;
215 #endif
216 #if CONFIG_VP9_VDPAU_HWACCEL
217  *fmtp++ = AV_PIX_FMT_VDPAU;
218 #endif
219 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
220  *fmtp++ = AV_PIX_FMT_VIDEOTOOLBOX;
221 #endif
222  break;
224 #if CONFIG_VP9_NVDEC_HWACCEL
225  *fmtp++ = AV_PIX_FMT_CUDA;
226 #endif
227 #if CONFIG_VP9_VAAPI_HWACCEL
228  *fmtp++ = AV_PIX_FMT_VAAPI;
229 #endif
230 #if CONFIG_VP9_VDPAU_HWACCEL
231  *fmtp++ = AV_PIX_FMT_VDPAU;
232 #endif
233  break;
234  case AV_PIX_FMT_YUV444P:
237 #if CONFIG_VP9_VAAPI_HWACCEL
238  *fmtp++ = AV_PIX_FMT_VAAPI;
239 #endif
240  break;
241  case AV_PIX_FMT_GBRP:
242  case AV_PIX_FMT_GBRP10:
243  case AV_PIX_FMT_GBRP12:
244 #if CONFIG_VP9_VAAPI_HWACCEL
245  *fmtp++ = AV_PIX_FMT_VAAPI;
246 #endif
247  break;
248  }
249 
250  *fmtp++ = s->pix_fmt;
251  *fmtp = AV_PIX_FMT_NONE;
252 
253  ret = ff_get_format(avctx, pix_fmts);
254  if (ret < 0)
255  return ret;
256 
257  avctx->pix_fmt = ret;
258  s->gf_fmt = s->pix_fmt;
259  s->w = w;
260  s->h = h;
261  }
262 
263  cols = (w + 7) >> 3;
264  rows = (h + 7) >> 3;
265 
266  if (s->intra_pred_data[0] && cols == s->cols && rows == s->rows && s->pix_fmt == s->last_fmt)
267  return 0;
268 
269  s->last_fmt = s->pix_fmt;
270  s->sb_cols = (w + 63) >> 6;
271  s->sb_rows = (h + 63) >> 6;
272  s->cols = (w + 7) >> 3;
273  s->rows = (h + 7) >> 3;
274  lflvl_len = avctx->active_thread_type == FF_THREAD_SLICE ? s->sb_rows : 1;
275 
276 #define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)
277  av_freep(&s->intra_pred_data[0]);
278  // FIXME we slightly over-allocate here for subsampled chroma, but a little
279  // bit of padding shouldn't affect performance...
280  p = av_malloc(s->sb_cols * (128 + 192 * bytesperpixel +
281  lflvl_len * sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx)));
282  if (!p)
283  return AVERROR(ENOMEM);
284  assign(s->intra_pred_data[0], uint8_t *, 64 * bytesperpixel);
285  assign(s->intra_pred_data[1], uint8_t *, 64 * bytesperpixel);
286  assign(s->intra_pred_data[2], uint8_t *, 64 * bytesperpixel);
287  assign(s->above_y_nnz_ctx, uint8_t *, 16);
288  assign(s->above_mode_ctx, uint8_t *, 16);
289  assign(s->above_mv_ctx, VP9mv(*)[2], 16);
290  assign(s->above_uv_nnz_ctx[0], uint8_t *, 16);
291  assign(s->above_uv_nnz_ctx[1], uint8_t *, 16);
292  assign(s->above_partition_ctx, uint8_t *, 8);
293  assign(s->above_skip_ctx, uint8_t *, 8);
294  assign(s->above_txfm_ctx, uint8_t *, 8);
295  assign(s->above_segpred_ctx, uint8_t *, 8);
296  assign(s->above_intra_ctx, uint8_t *, 8);
297  assign(s->above_comp_ctx, uint8_t *, 8);
298  assign(s->above_ref_ctx, uint8_t *, 8);
299  assign(s->above_filter_ctx, uint8_t *, 8);
300  assign(s->lflvl, VP9Filter *, lflvl_len);
301 #undef assign
302 
303  if (s->td) {
304  for (i = 0; i < s->active_tile_cols; i++)
305  vp9_tile_data_free(&s->td[i]);
306  }
307 
308  if (s->s.h.bpp != s->last_bpp) {
309  ff_vp9dsp_init(&s->dsp, s->s.h.bpp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
310  ff_videodsp_init(&s->vdsp, s->s.h.bpp);
311  s->last_bpp = s->s.h.bpp;
312  }
313 
314  return 0;
315 }
316 
318 {
319  int i;
320  VP9Context *s = avctx->priv_data;
321  int chroma_blocks, chroma_eobs, bytesperpixel = s->bytesperpixel;
322  VP9TileData *td = &s->td[0];
323 
324  if (td->b_base && td->block_base && s->block_alloc_using_2pass == s->s.frames[CUR_FRAME].uses_2pass)
325  return 0;
326 
328  chroma_blocks = 64 * 64 >> (s->ss_h + s->ss_v);
329  chroma_eobs = 16 * 16 >> (s->ss_h + s->ss_v);
330  if (s->s.frames[CUR_FRAME].uses_2pass) {
331  int sbs = s->sb_cols * s->sb_rows;
332 
333  td->b_base = av_malloc_array(s->cols * s->rows, sizeof(VP9Block));
334  td->block_base = av_mallocz(((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
335  16 * 16 + 2 * chroma_eobs) * sbs);
336  if (!td->b_base || !td->block_base)
337  return AVERROR(ENOMEM);
338  td->uvblock_base[0] = td->block_base + sbs * 64 * 64 * bytesperpixel;
339  td->uvblock_base[1] = td->uvblock_base[0] + sbs * chroma_blocks * bytesperpixel;
340  td->eob_base = (uint8_t *) (td->uvblock_base[1] + sbs * chroma_blocks * bytesperpixel);
341  td->uveob_base[0] = td->eob_base + 16 * 16 * sbs;
342  td->uveob_base[1] = td->uveob_base[0] + chroma_eobs * sbs;
343 
345  td->block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
346  if (!td->block_structure)
347  return AVERROR(ENOMEM);
348  }
349  } else {
350  for (i = 1; i < s->active_tile_cols; i++)
351  vp9_tile_data_free(&s->td[i]);
352 
353  for (i = 0; i < s->active_tile_cols; i++) {
354  s->td[i].b_base = av_malloc(sizeof(VP9Block));
355  s->td[i].block_base = av_mallocz((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
356  16 * 16 + 2 * chroma_eobs);
357  if (!s->td[i].b_base || !s->td[i].block_base)
358  return AVERROR(ENOMEM);
359  s->td[i].uvblock_base[0] = s->td[i].block_base + 64 * 64 * bytesperpixel;
360  s->td[i].uvblock_base[1] = s->td[i].uvblock_base[0] + chroma_blocks * bytesperpixel;
361  s->td[i].eob_base = (uint8_t *) (s->td[i].uvblock_base[1] + chroma_blocks * bytesperpixel);
362  s->td[i].uveob_base[0] = s->td[i].eob_base + 16 * 16;
363  s->td[i].uveob_base[1] = s->td[i].uveob_base[0] + chroma_eobs;
364 
366  s->td[i].block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
367  if (!s->td[i].block_structure)
368  return AVERROR(ENOMEM);
369  }
370  }
371  }
372  s->block_alloc_using_2pass = s->s.frames[CUR_FRAME].uses_2pass;
373 
374  return 0;
375 }
376 
377 // The sign bit is at the end, not the start, of a bit sequence
379 {
380  int v = get_bits(gb, n);
381  return get_bits1(gb) ? -v : v;
382 }
383 
384 static av_always_inline int inv_recenter_nonneg(int v, int m)
385 {
386  if (v > 2 * m)
387  return v;
388  if (v & 1)
389  return m - ((v + 1) >> 1);
390  return m + (v >> 1);
391 }
392 
393 // differential forward probability updates
394 static int update_prob(VPXRangeCoder *c, int p)
395 {
396  static const uint8_t inv_map_table[255] = {
397  7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
398  189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
399  10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
400  25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39,
401  40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54,
402  55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
403  70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
404  86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100,
405  101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
406  116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
407  131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
408  146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
409  161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
410  177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
411  192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
412  207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
413  222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
414  237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
415  252, 253, 253,
416  };
417  int d;
418 
419  /* This code is trying to do a differential probability update. For a
420  * current probability A in the range [1, 255], the difference to a new
421  * probability of any value can be expressed differentially as 1-A, 255-A
422  * where some part of this (absolute range) exists both in positive as
423  * well as the negative part, whereas another part only exists in one
424  * half. We're trying to code this shared part differentially, i.e.
425  * times two where the value of the lowest bit specifies the sign, and
426  * the single part is then coded on top of this. This absolute difference
427  * then again has a value of [0, 254], but a bigger value in this range
428  * indicates that we're further away from the original value A, so we
429  * can code this as a VLC code, since higher values are increasingly
430  * unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough'
431  * updates vs. the 'fine, exact' updates further down the range, which
432  * adds one extra dimension to this differential update model. */
433 
434  if (!vp89_rac_get(c)) {
435  d = vp89_rac_get_uint(c, 4) + 0;
436  } else if (!vp89_rac_get(c)) {
437  d = vp89_rac_get_uint(c, 4) + 16;
438  } else if (!vp89_rac_get(c)) {
439  d = vp89_rac_get_uint(c, 5) + 32;
440  } else {
441  d = vp89_rac_get_uint(c, 7);
442  if (d >= 65)
443  d = (d << 1) - 65 + vp89_rac_get(c);
444  d += 64;
445  av_assert2(d < FF_ARRAY_ELEMS(inv_map_table));
446  }
447 
448  return p <= 128 ? 1 + inv_recenter_nonneg(inv_map_table[d], p - 1) :
449  255 - inv_recenter_nonneg(inv_map_table[d], 255 - p);
450 }
451 
453 {
454  static const enum AVColorSpace colorspaces[8] = {
457  };
458  VP9Context *s = avctx->priv_data;
459  int bits = avctx->profile <= 1 ? 0 : 1 + get_bits1(&s->gb); // 0:8, 1:10, 2:12
460 
461  s->bpp_index = bits;
462  s->s.h.bpp = 8 + bits * 2;
463  s->bytesperpixel = (7 + s->s.h.bpp) >> 3;
464  avctx->colorspace = colorspaces[get_bits(&s->gb, 3)];
465  if (avctx->colorspace == AVCOL_SPC_RGB) { // RGB = profile 1
466  static const enum AVPixelFormat pix_fmt_rgb[3] = {
468  };
469  s->ss_h = s->ss_v = 0;
470  avctx->color_range = AVCOL_RANGE_JPEG;
471  s->pix_fmt = pix_fmt_rgb[bits];
472  if (avctx->profile & 1) {
473  if (get_bits1(&s->gb)) {
474  av_log(avctx, AV_LOG_ERROR, "Reserved bit set in RGB\n");
475  return AVERROR_INVALIDDATA;
476  }
477  } else {
478  av_log(avctx, AV_LOG_ERROR, "RGB not supported in profile %d\n",
479  avctx->profile);
480  return AVERROR_INVALIDDATA;
481  }
482  } else {
483  static const enum AVPixelFormat pix_fmt_for_ss[3][2 /* v */][2 /* h */] = {
490  };
492  if (avctx->profile & 1) {
493  s->ss_h = get_bits1(&s->gb);
494  s->ss_v = get_bits1(&s->gb);
495  s->pix_fmt = pix_fmt_for_ss[bits][s->ss_v][s->ss_h];
496  if (s->pix_fmt == AV_PIX_FMT_YUV420P) {
497  av_log(avctx, AV_LOG_ERROR, "YUV 4:2:0 not supported in profile %d\n",
498  avctx->profile);
499  return AVERROR_INVALIDDATA;
500  } else if (get_bits1(&s->gb)) {
501  av_log(avctx, AV_LOG_ERROR, "Profile %d color details reserved bit set\n",
502  avctx->profile);
503  return AVERROR_INVALIDDATA;
504  }
505  } else {
506  s->ss_h = s->ss_v = 1;
507  s->pix_fmt = pix_fmt_for_ss[bits][1][1];
508  }
509  }
510 
511  return 0;
512 }
513 
515  const uint8_t *data, int size, int *ref)
516 {
517  VP9Context *s = avctx->priv_data;
518  int c, i, j, k, l, m, n, w, h, max, size2, ret, sharp;
519  int last_invisible;
520  const uint8_t *data2;
521 
522  /* general header */
523  if ((ret = init_get_bits8(&s->gb, data, size)) < 0) {
524  av_log(avctx, AV_LOG_ERROR, "Failed to initialize bitstream reader\n");
525  return ret;
526  }
527  if (get_bits(&s->gb, 2) != 0x2) { // frame marker
528  av_log(avctx, AV_LOG_ERROR, "Invalid frame marker\n");
529  return AVERROR_INVALIDDATA;
530  }
531  avctx->profile = get_bits1(&s->gb);
532  avctx->profile |= get_bits1(&s->gb) << 1;
533  if (avctx->profile == 3) avctx->profile += get_bits1(&s->gb);
534  if (avctx->profile > 3) {
535  av_log(avctx, AV_LOG_ERROR, "Profile %d is not yet supported\n", avctx->profile);
536  return AVERROR_INVALIDDATA;
537  }
538  s->s.h.profile = avctx->profile;
539  if (get_bits1(&s->gb)) {
540  *ref = get_bits(&s->gb, 3);
541  return 0;
542  }
543 
544  s->last_keyframe = s->s.h.keyframe;
545  s->s.h.keyframe = !get_bits1(&s->gb);
546 
547  last_invisible = s->s.h.invisible;
548  s->s.h.invisible = !get_bits1(&s->gb);
549  s->s.h.errorres = get_bits1(&s->gb);
550  s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible;
551 
552  if (s->s.h.keyframe) {
553  if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
554  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
555  return AVERROR_INVALIDDATA;
556  }
557  if ((ret = read_colorspace_details(avctx)) < 0)
558  return ret;
559  // for profile 1, here follows the subsampling bits
560  s->s.h.refreshrefmask = 0xff;
561  w = get_bits(&s->gb, 16) + 1;
562  h = get_bits(&s->gb, 16) + 1;
563  if (get_bits1(&s->gb)) // display size
564  skip_bits(&s->gb, 32);
565  } else {
566  s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0;
567  s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2);
568  if (s->s.h.intraonly) {
569  if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
570  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
571  return AVERROR_INVALIDDATA;
572  }
573  if (avctx->profile >= 1) {
574  if ((ret = read_colorspace_details(avctx)) < 0)
575  return ret;
576  } else {
577  s->ss_h = s->ss_v = 1;
578  s->s.h.bpp = 8;
579  s->bpp_index = 0;
580  s->bytesperpixel = 1;
581  s->pix_fmt = AV_PIX_FMT_YUV420P;
582  avctx->colorspace = AVCOL_SPC_BT470BG;
583  avctx->color_range = AVCOL_RANGE_MPEG;
584  }
585  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
586  w = get_bits(&s->gb, 16) + 1;
587  h = get_bits(&s->gb, 16) + 1;
588  if (get_bits1(&s->gb)) // display size
589  skip_bits(&s->gb, 32);
590  } else {
591  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
592  s->s.h.refidx[0] = get_bits(&s->gb, 3);
593  s->s.h.signbias[0] = get_bits1(&s->gb) && !s->s.h.errorres;
594  s->s.h.refidx[1] = get_bits(&s->gb, 3);
595  s->s.h.signbias[1] = get_bits1(&s->gb) && !s->s.h.errorres;
596  s->s.h.refidx[2] = get_bits(&s->gb, 3);
597  s->s.h.signbias[2] = get_bits1(&s->gb) && !s->s.h.errorres;
598  if (!s->s.refs[s->s.h.refidx[0]].f->buf[0] ||
599  !s->s.refs[s->s.h.refidx[1]].f->buf[0] ||
600  !s->s.refs[s->s.h.refidx[2]].f->buf[0]) {
601  av_log(avctx, AV_LOG_ERROR, "Not all references are available\n");
602  return AVERROR_INVALIDDATA;
603  }
604  if (get_bits1(&s->gb)) {
605  w = s->s.refs[s->s.h.refidx[0]].f->width;
606  h = s->s.refs[s->s.h.refidx[0]].f->height;
607  } else if (get_bits1(&s->gb)) {
608  w = s->s.refs[s->s.h.refidx[1]].f->width;
609  h = s->s.refs[s->s.h.refidx[1]].f->height;
610  } else if (get_bits1(&s->gb)) {
611  w = s->s.refs[s->s.h.refidx[2]].f->width;
612  h = s->s.refs[s->s.h.refidx[2]].f->height;
613  } else {
614  w = get_bits(&s->gb, 16) + 1;
615  h = get_bits(&s->gb, 16) + 1;
616  }
617  // Note that in this code, "CUR_FRAME" is actually before we
618  // have formally allocated a frame, and thus actually represents
619  // the _last_ frame
620  s->s.h.use_last_frame_mvs &= s->s.frames[CUR_FRAME].tf.f->width == w &&
621  s->s.frames[CUR_FRAME].tf.f->height == h;
622  if (get_bits1(&s->gb)) // display size
623  skip_bits(&s->gb, 32);
624  s->s.h.highprecisionmvs = get_bits1(&s->gb);
625  s->s.h.filtermode = get_bits1(&s->gb) ? FILTER_SWITCHABLE :
626  get_bits(&s->gb, 2);
627  s->s.h.allowcompinter = s->s.h.signbias[0] != s->s.h.signbias[1] ||
628  s->s.h.signbias[0] != s->s.h.signbias[2];
629  if (s->s.h.allowcompinter) {
630  if (s->s.h.signbias[0] == s->s.h.signbias[1]) {
631  s->s.h.fixcompref = 2;
632  s->s.h.varcompref[0] = 0;
633  s->s.h.varcompref[1] = 1;
634  } else if (s->s.h.signbias[0] == s->s.h.signbias[2]) {
635  s->s.h.fixcompref = 1;
636  s->s.h.varcompref[0] = 0;
637  s->s.h.varcompref[1] = 2;
638  } else {
639  s->s.h.fixcompref = 0;
640  s->s.h.varcompref[0] = 1;
641  s->s.h.varcompref[1] = 2;
642  }
643  }
644  }
645  }
646  s->s.h.refreshctx = s->s.h.errorres ? 0 : get_bits1(&s->gb);
647  s->s.h.parallelmode = s->s.h.errorres ? 1 : get_bits1(&s->gb);
648  s->s.h.framectxid = c = get_bits(&s->gb, 2);
649  if (s->s.h.keyframe || s->s.h.intraonly)
650  s->s.h.framectxid = 0; // BUG: libvpx ignores this field in keyframes
651 
652  /* loopfilter header data */
653  if (s->s.h.keyframe || s->s.h.errorres || s->s.h.intraonly) {
654  // reset loopfilter defaults
655  s->s.h.lf_delta.ref[0] = 1;
656  s->s.h.lf_delta.ref[1] = 0;
657  s->s.h.lf_delta.ref[2] = -1;
658  s->s.h.lf_delta.ref[3] = -1;
659  s->s.h.lf_delta.mode[0] = 0;
660  s->s.h.lf_delta.mode[1] = 0;
661  memset(s->s.h.segmentation.feat, 0, sizeof(s->s.h.segmentation.feat));
662  }
663  s->s.h.filter.level = get_bits(&s->gb, 6);
664  sharp = get_bits(&s->gb, 3);
665  // if sharpness changed, reinit lim/mblim LUTs. if it didn't change, keep
666  // the old cache values since they are still valid
667  if (s->s.h.filter.sharpness != sharp) {
668  for (i = 1; i <= 63; i++) {
669  int limit = i;
670 
671  if (sharp > 0) {
672  limit >>= (sharp + 3) >> 2;
673  limit = FFMIN(limit, 9 - sharp);
674  }
675  limit = FFMAX(limit, 1);
676 
677  s->filter_lut.lim_lut[i] = limit;
678  s->filter_lut.mblim_lut[i] = 2 * (i + 2) + limit;
679  }
680  }
681  s->s.h.filter.sharpness = sharp;
682  if ((s->s.h.lf_delta.enabled = get_bits1(&s->gb))) {
683  if ((s->s.h.lf_delta.updated = get_bits1(&s->gb))) {
684  for (i = 0; i < 4; i++)
685  if (get_bits1(&s->gb))
686  s->s.h.lf_delta.ref[i] = get_sbits_inv(&s->gb, 6);
687  for (i = 0; i < 2; i++)
688  if (get_bits1(&s->gb))
689  s->s.h.lf_delta.mode[i] = get_sbits_inv(&s->gb, 6);
690  }
691  }
692 
693  /* quantization header data */
694  s->s.h.yac_qi = get_bits(&s->gb, 8);
695  s->s.h.ydc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
696  s->s.h.uvdc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
697  s->s.h.uvac_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
698  s->s.h.lossless = s->s.h.yac_qi == 0 && s->s.h.ydc_qdelta == 0 &&
699  s->s.h.uvdc_qdelta == 0 && s->s.h.uvac_qdelta == 0;
700  if (s->s.h.lossless)
702 
703  /* segmentation header info */
704  if ((s->s.h.segmentation.enabled = get_bits1(&s->gb))) {
705  if ((s->s.h.segmentation.update_map = get_bits1(&s->gb))) {
706  for (i = 0; i < 7; i++)
707  s->s.h.segmentation.prob[i] = get_bits1(&s->gb) ?
708  get_bits(&s->gb, 8) : 255;
709  if ((s->s.h.segmentation.temporal = get_bits1(&s->gb)))
710  for (i = 0; i < 3; i++)
711  s->s.h.segmentation.pred_prob[i] = get_bits1(&s->gb) ?
712  get_bits(&s->gb, 8) : 255;
713  }
714 
715  if (get_bits1(&s->gb)) {
716  s->s.h.segmentation.absolute_vals = get_bits1(&s->gb);
717  for (i = 0; i < 8; i++) {
718  if ((s->s.h.segmentation.feat[i].q_enabled = get_bits1(&s->gb)))
719  s->s.h.segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8);
720  if ((s->s.h.segmentation.feat[i].lf_enabled = get_bits1(&s->gb)))
721  s->s.h.segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6);
722  if ((s->s.h.segmentation.feat[i].ref_enabled = get_bits1(&s->gb)))
723  s->s.h.segmentation.feat[i].ref_val = get_bits(&s->gb, 2);
724  s->s.h.segmentation.feat[i].skip_enabled = get_bits1(&s->gb);
725  }
726  }
727  }
728 
729  // set qmul[] based on Y/UV, AC/DC and segmentation Q idx deltas
730  for (i = 0; i < (s->s.h.segmentation.enabled ? 8 : 1); i++) {
731  int qyac, qydc, quvac, quvdc, lflvl, sh;
732 
733  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].q_enabled) {
734  if (s->s.h.segmentation.absolute_vals)
735  qyac = av_clip_uintp2(s->s.h.segmentation.feat[i].q_val, 8);
736  else
737  qyac = av_clip_uintp2(s->s.h.yac_qi + s->s.h.segmentation.feat[i].q_val, 8);
738  } else {
739  qyac = s->s.h.yac_qi;
740  }
741  qydc = av_clip_uintp2(qyac + s->s.h.ydc_qdelta, 8);
742  quvdc = av_clip_uintp2(qyac + s->s.h.uvdc_qdelta, 8);
743  quvac = av_clip_uintp2(qyac + s->s.h.uvac_qdelta, 8);
744  qyac = av_clip_uintp2(qyac, 8);
745 
746  s->s.h.segmentation.feat[i].qmul[0][0] = ff_vp9_dc_qlookup[s->bpp_index][qydc];
747  s->s.h.segmentation.feat[i].qmul[0][1] = ff_vp9_ac_qlookup[s->bpp_index][qyac];
748  s->s.h.segmentation.feat[i].qmul[1][0] = ff_vp9_dc_qlookup[s->bpp_index][quvdc];
749  s->s.h.segmentation.feat[i].qmul[1][1] = ff_vp9_ac_qlookup[s->bpp_index][quvac];
750 
751  sh = s->s.h.filter.level >= 32;
752  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].lf_enabled) {
753  if (s->s.h.segmentation.absolute_vals)
754  lflvl = av_clip_uintp2(s->s.h.segmentation.feat[i].lf_val, 6);
755  else
756  lflvl = av_clip_uintp2(s->s.h.filter.level + s->s.h.segmentation.feat[i].lf_val, 6);
757  } else {
758  lflvl = s->s.h.filter.level;
759  }
760  if (s->s.h.lf_delta.enabled) {
761  s->s.h.segmentation.feat[i].lflvl[0][0] =
762  s->s.h.segmentation.feat[i].lflvl[0][1] =
763  av_clip_uintp2(lflvl + (s->s.h.lf_delta.ref[0] * (1 << sh)), 6);
764  for (j = 1; j < 4; j++) {
765  s->s.h.segmentation.feat[i].lflvl[j][0] =
766  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
767  s->s.h.lf_delta.mode[0]) * (1 << sh)), 6);
768  s->s.h.segmentation.feat[i].lflvl[j][1] =
769  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
770  s->s.h.lf_delta.mode[1]) * (1 << sh)), 6);
771  }
772  } else {
773  memset(s->s.h.segmentation.feat[i].lflvl, lflvl,
774  sizeof(s->s.h.segmentation.feat[i].lflvl));
775  }
776  }
777 
778  /* tiling info */
779  if ((ret = update_size(avctx, w, h)) < 0) {
780  av_log(avctx, AV_LOG_ERROR, "Failed to initialize decoder for %dx%d @ %d\n",
781  w, h, s->pix_fmt);
782  return ret;
783  }
784  for (s->s.h.tiling.log2_tile_cols = 0;
785  s->sb_cols > (64 << s->s.h.tiling.log2_tile_cols);
786  s->s.h.tiling.log2_tile_cols++) ;
787  for (max = 0; (s->sb_cols >> max) >= 4; max++) ;
788  max = FFMAX(0, max - 1);
789  while (max > s->s.h.tiling.log2_tile_cols) {
790  if (get_bits1(&s->gb))
791  s->s.h.tiling.log2_tile_cols++;
792  else
793  break;
794  }
795  s->s.h.tiling.log2_tile_rows = decode012(&s->gb);
796  s->s.h.tiling.tile_rows = 1 << s->s.h.tiling.log2_tile_rows;
797  if (s->s.h.tiling.tile_cols != (1 << s->s.h.tiling.log2_tile_cols)) {
798  int n_range_coders;
799  VPXRangeCoder *rc;
800 
801  if (s->td) {
802  for (i = 0; i < s->active_tile_cols; i++)
803  vp9_tile_data_free(&s->td[i]);
804  av_freep(&s->td);
805  }
806 
807  s->s.h.tiling.tile_cols = 1 << s->s.h.tiling.log2_tile_cols;
808  s->active_tile_cols = avctx->active_thread_type == FF_THREAD_SLICE ?
809  s->s.h.tiling.tile_cols : 1;
810  vp9_alloc_entries(avctx, s->sb_rows);
811  if (avctx->active_thread_type == FF_THREAD_SLICE) {
812  n_range_coders = 4; // max_tile_rows
813  } else {
814  n_range_coders = s->s.h.tiling.tile_cols;
815  }
816  s->td = av_calloc(s->active_tile_cols, sizeof(VP9TileData) +
817  n_range_coders * sizeof(VPXRangeCoder));
818  if (!s->td)
819  return AVERROR(ENOMEM);
820  rc = (VPXRangeCoder *) &s->td[s->active_tile_cols];
821  for (i = 0; i < s->active_tile_cols; i++) {
822  s->td[i].s = s;
823  s->td[i].c_b = rc;
824  rc += n_range_coders;
825  }
826  }
827 
828  /* check reference frames */
829  if (!s->s.h.keyframe && !s->s.h.intraonly) {
830  int valid_ref_frame = 0;
831  for (i = 0; i < 3; i++) {
832  AVFrame *ref = s->s.refs[s->s.h.refidx[i]].f;
833  int refw = ref->width, refh = ref->height;
834 
835  if (ref->format != avctx->pix_fmt) {
836  av_log(avctx, AV_LOG_ERROR,
837  "Ref pixfmt (%s) did not match current frame (%s)",
838  av_get_pix_fmt_name(ref->format),
839  av_get_pix_fmt_name(avctx->pix_fmt));
840  return AVERROR_INVALIDDATA;
841  } else if (refw == w && refh == h) {
842  s->mvscale[i][0] = s->mvscale[i][1] = 0;
843  } else {
844  /* Check to make sure at least one of frames that */
845  /* this frame references has valid dimensions */
846  if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 * refh) {
847  av_log(avctx, AV_LOG_WARNING,
848  "Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
849  refw, refh, w, h);
850  s->mvscale[i][0] = s->mvscale[i][1] = REF_INVALID_SCALE;
851  continue;
852  }
853  s->mvscale[i][0] = (refw << 14) / w;
854  s->mvscale[i][1] = (refh << 14) / h;
855  s->mvstep[i][0] = 16 * s->mvscale[i][0] >> 14;
856  s->mvstep[i][1] = 16 * s->mvscale[i][1] >> 14;
857  }
858  valid_ref_frame++;
859  }
860  if (!valid_ref_frame) {
861  av_log(avctx, AV_LOG_ERROR, "No valid reference frame is found, bitstream not supported\n");
862  return AVERROR_INVALIDDATA;
863  }
864  }
865 
866  if (s->s.h.keyframe || s->s.h.errorres || (s->s.h.intraonly && s->s.h.resetctx == 3)) {
867  s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p =
868  s->prob_ctx[3].p = ff_vp9_default_probs;
869  memcpy(s->prob_ctx[0].coef, ff_vp9_default_coef_probs,
870  sizeof(ff_vp9_default_coef_probs));
871  memcpy(s->prob_ctx[1].coef, ff_vp9_default_coef_probs,
872  sizeof(ff_vp9_default_coef_probs));
873  memcpy(s->prob_ctx[2].coef, ff_vp9_default_coef_probs,
874  sizeof(ff_vp9_default_coef_probs));
875  memcpy(s->prob_ctx[3].coef, ff_vp9_default_coef_probs,
876  sizeof(ff_vp9_default_coef_probs));
877  } else if (s->s.h.intraonly && s->s.h.resetctx == 2) {
878  s->prob_ctx[c].p = ff_vp9_default_probs;
879  memcpy(s->prob_ctx[c].coef, ff_vp9_default_coef_probs,
880  sizeof(ff_vp9_default_coef_probs));
881  }
882 
883  // next 16 bits is size of the rest of the header (arith-coded)
884  s->s.h.compressed_header_size = size2 = get_bits(&s->gb, 16);
885  s->s.h.uncompressed_header_size = (get_bits_count(&s->gb) + 7) / 8;
886 
887  data2 = align_get_bits(&s->gb);
888  if (size2 > size - (data2 - data)) {
889  av_log(avctx, AV_LOG_ERROR, "Invalid compressed header size\n");
890  return AVERROR_INVALIDDATA;
891  }
892  ret = ff_vpx_init_range_decoder(&s->c, data2, size2);
893  if (ret < 0)
894  return ret;
895 
896  if (vpx_rac_get_prob_branchy(&s->c, 128)) { // marker bit
897  av_log(avctx, AV_LOG_ERROR, "Marker bit was set\n");
898  return AVERROR_INVALIDDATA;
899  }
900 
901  for (i = 0; i < s->active_tile_cols; i++) {
902  if (s->s.h.keyframe || s->s.h.intraonly) {
903  memset(s->td[i].counts.coef, 0, sizeof(s->td[0].counts.coef));
904  memset(s->td[i].counts.eob, 0, sizeof(s->td[0].counts.eob));
905  } else {
906  memset(&s->td[i].counts, 0, sizeof(s->td[0].counts));
907  }
908  s->td[i].nb_block_structure = 0;
909  }
910 
911  /* FIXME is it faster to not copy here, but do it down in the fw updates
912  * as explicit copies if the fw update is missing (and skip the copy upon
913  * fw update)? */
914  s->prob.p = s->prob_ctx[c].p;
915 
916  // txfm updates
917  if (s->s.h.lossless) {
918  s->s.h.txfmmode = TX_4X4;
919  } else {
920  s->s.h.txfmmode = vp89_rac_get_uint(&s->c, 2);
921  if (s->s.h.txfmmode == 3)
922  s->s.h.txfmmode += vp89_rac_get(&s->c);
923 
924  if (s->s.h.txfmmode == TX_SWITCHABLE) {
925  for (i = 0; i < 2; i++)
926  if (vpx_rac_get_prob_branchy(&s->c, 252))
927  s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i]);
928  for (i = 0; i < 2; i++)
929  for (j = 0; j < 2; j++)
930  if (vpx_rac_get_prob_branchy(&s->c, 252))
931  s->prob.p.tx16p[i][j] =
932  update_prob(&s->c, s->prob.p.tx16p[i][j]);
933  for (i = 0; i < 2; i++)
934  for (j = 0; j < 3; j++)
935  if (vpx_rac_get_prob_branchy(&s->c, 252))
936  s->prob.p.tx32p[i][j] =
937  update_prob(&s->c, s->prob.p.tx32p[i][j]);
938  }
939  }
940 
941  // coef updates
942  for (i = 0; i < 4; i++) {
943  uint8_t (*ref)[2][6][6][3] = s->prob_ctx[c].coef[i];
944  if (vp89_rac_get(&s->c)) {
945  for (j = 0; j < 2; j++)
946  for (k = 0; k < 2; k++)
947  for (l = 0; l < 6; l++)
948  for (m = 0; m < 6; m++) {
949  uint8_t *p = s->prob.coef[i][j][k][l][m];
950  uint8_t *r = ref[j][k][l][m];
951  if (m >= 3 && l == 0) // dc only has 3 pt
952  break;
953  for (n = 0; n < 3; n++) {
954  if (vpx_rac_get_prob_branchy(&s->c, 252))
955  p[n] = update_prob(&s->c, r[n]);
956  else
957  p[n] = r[n];
958  }
959  memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
960  }
961  } else {
962  for (j = 0; j < 2; j++)
963  for (k = 0; k < 2; k++)
964  for (l = 0; l < 6; l++)
965  for (m = 0; m < 6; m++) {
966  uint8_t *p = s->prob.coef[i][j][k][l][m];
967  uint8_t *r = ref[j][k][l][m];
968  if (m > 3 && l == 0) // dc only has 3 pt
969  break;
970  memcpy(p, r, 3);
971  memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
972  }
973  }
974  if (s->s.h.txfmmode == i)
975  break;
976  }
977 
978  // mode updates
979  for (i = 0; i < 3; i++)
980  if (vpx_rac_get_prob_branchy(&s->c, 252))
981  s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i]);
982  if (!s->s.h.keyframe && !s->s.h.intraonly) {
983  for (i = 0; i < 7; i++)
984  for (j = 0; j < 3; j++)
985  if (vpx_rac_get_prob_branchy(&s->c, 252))
986  s->prob.p.mv_mode[i][j] =
987  update_prob(&s->c, s->prob.p.mv_mode[i][j]);
988 
989  if (s->s.h.filtermode == FILTER_SWITCHABLE)
990  for (i = 0; i < 4; i++)
991  for (j = 0; j < 2; j++)
992  if (vpx_rac_get_prob_branchy(&s->c, 252))
993  s->prob.p.filter[i][j] =
994  update_prob(&s->c, s->prob.p.filter[i][j]);
995 
996  for (i = 0; i < 4; i++)
997  if (vpx_rac_get_prob_branchy(&s->c, 252))
998  s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i]);
999 
1000  if (s->s.h.allowcompinter) {
1001  s->s.h.comppredmode = vp89_rac_get(&s->c);
1002  if (s->s.h.comppredmode)
1003  s->s.h.comppredmode += vp89_rac_get(&s->c);
1004  if (s->s.h.comppredmode == PRED_SWITCHABLE)
1005  for (i = 0; i < 5; i++)
1006  if (vpx_rac_get_prob_branchy(&s->c, 252))
1007  s->prob.p.comp[i] =
1008  update_prob(&s->c, s->prob.p.comp[i]);
1009  } else {
1010  s->s.h.comppredmode = PRED_SINGLEREF;
1011  }
1012 
1013  if (s->s.h.comppredmode != PRED_COMPREF) {
1014  for (i = 0; i < 5; i++) {
1015  if (vpx_rac_get_prob_branchy(&s->c, 252))
1016  s->prob.p.single_ref[i][0] =
1017  update_prob(&s->c, s->prob.p.single_ref[i][0]);
1018  if (vpx_rac_get_prob_branchy(&s->c, 252))
1019  s->prob.p.single_ref[i][1] =
1020  update_prob(&s->c, s->prob.p.single_ref[i][1]);
1021  }
1022  }
1023 
1024  if (s->s.h.comppredmode != PRED_SINGLEREF) {
1025  for (i = 0; i < 5; i++)
1026  if (vpx_rac_get_prob_branchy(&s->c, 252))
1027  s->prob.p.comp_ref[i] =
1028  update_prob(&s->c, s->prob.p.comp_ref[i]);
1029  }
1030 
1031  for (i = 0; i < 4; i++)
1032  for (j = 0; j < 9; j++)
1033  if (vpx_rac_get_prob_branchy(&s->c, 252))
1034  s->prob.p.y_mode[i][j] =
1035  update_prob(&s->c, s->prob.p.y_mode[i][j]);
1036 
1037  for (i = 0; i < 4; i++)
1038  for (j = 0; j < 4; j++)
1039  for (k = 0; k < 3; k++)
1040  if (vpx_rac_get_prob_branchy(&s->c, 252))
1041  s->prob.p.partition[3 - i][j][k] =
1042  update_prob(&s->c,
1043  s->prob.p.partition[3 - i][j][k]);
1044 
1045  // mv fields don't use the update_prob subexp model for some reason
1046  for (i = 0; i < 3; i++)
1047  if (vpx_rac_get_prob_branchy(&s->c, 252))
1048  s->prob.p.mv_joint[i] = (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1049 
1050  for (i = 0; i < 2; i++) {
1051  if (vpx_rac_get_prob_branchy(&s->c, 252))
1052  s->prob.p.mv_comp[i].sign =
1053  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1054 
1055  for (j = 0; j < 10; j++)
1056  if (vpx_rac_get_prob_branchy(&s->c, 252))
1057  s->prob.p.mv_comp[i].classes[j] =
1058  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1059 
1060  if (vpx_rac_get_prob_branchy(&s->c, 252))
1061  s->prob.p.mv_comp[i].class0 =
1062  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1063 
1064  for (j = 0; j < 10; j++)
1065  if (vpx_rac_get_prob_branchy(&s->c, 252))
1066  s->prob.p.mv_comp[i].bits[j] =
1067  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1068  }
1069 
1070  for (i = 0; i < 2; i++) {
1071  for (j = 0; j < 2; j++)
1072  for (k = 0; k < 3; k++)
1073  if (vpx_rac_get_prob_branchy(&s->c, 252))
1074  s->prob.p.mv_comp[i].class0_fp[j][k] =
1075  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1076 
1077  for (j = 0; j < 3; j++)
1078  if (vpx_rac_get_prob_branchy(&s->c, 252))
1079  s->prob.p.mv_comp[i].fp[j] =
1080  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1081  }
1082 
1083  if (s->s.h.highprecisionmvs) {
1084  for (i = 0; i < 2; i++) {
1085  if (vpx_rac_get_prob_branchy(&s->c, 252))
1086  s->prob.p.mv_comp[i].class0_hp =
1087  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1088 
1089  if (vpx_rac_get_prob_branchy(&s->c, 252))
1090  s->prob.p.mv_comp[i].hp =
1091  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1092  }
1093  }
1094  }
1095 
1096  return (data2 - data) + size2;
1097 }
1098 
1099 static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl,
1100  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1101 {
1102  const VP9Context *s = td->s;
1103  int c = ((s->above_partition_ctx[col] >> (3 - bl)) & 1) |
1104  (((td->left_partition_ctx[row & 0x7] >> (3 - bl)) & 1) << 1);
1105  const uint8_t *p = s->s.h.keyframe || s->s.h.intraonly ? ff_vp9_default_kf_partition_probs[bl][c] :
1106  s->prob.p.partition[bl][c];
1107  enum BlockPartition bp;
1108  ptrdiff_t hbs = 4 >> bl;
1109  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1110  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1111  int bytesperpixel = s->bytesperpixel;
1112 
1113  if (bl == BL_8X8) {
1115  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1116  } else if (col + hbs < s->cols) { // FIXME why not <=?
1117  if (row + hbs < s->rows) { // FIXME why not <=?
1119  switch (bp) {
1120  case PARTITION_NONE:
1121  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1122  break;
1123  case PARTITION_H:
1124  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1125  yoff += hbs * 8 * y_stride;
1126  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1127  ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, bl, bp);
1128  break;
1129  case PARTITION_V:
1130  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1131  yoff += hbs * 8 * bytesperpixel;
1132  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1133  ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, bl, bp);
1134  break;
1135  case PARTITION_SPLIT:
1136  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1137  decode_sb(td, row, col + hbs, lflvl,
1138  yoff + 8 * hbs * bytesperpixel,
1139  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1140  yoff += hbs * 8 * y_stride;
1141  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1142  decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1143  decode_sb(td, row + hbs, col + hbs, lflvl,
1144  yoff + 8 * hbs * bytesperpixel,
1145  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1146  break;
1147  default:
1148  av_assert0(0);
1149  }
1150  } else if (vpx_rac_get_prob_branchy(td->c, p[1])) {
1151  bp = PARTITION_SPLIT;
1152  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1153  decode_sb(td, row, col + hbs, lflvl,
1154  yoff + 8 * hbs * bytesperpixel,
1155  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1156  } else {
1157  bp = PARTITION_H;
1158  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1159  }
1160  } else if (row + hbs < s->rows) { // FIXME why not <=?
1161  if (vpx_rac_get_prob_branchy(td->c, p[2])) {
1162  bp = PARTITION_SPLIT;
1163  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1164  yoff += hbs * 8 * y_stride;
1165  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1166  decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1167  } else {
1168  bp = PARTITION_V;
1169  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1170  }
1171  } else {
1172  bp = PARTITION_SPLIT;
1173  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1174  }
1175  td->counts.partition[bl][c][bp]++;
1176 }
1177 
1178 static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl,
1179  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1180 {
1181  const VP9Context *s = td->s;
1182  VP9Block *b = td->b;
1183  ptrdiff_t hbs = 4 >> bl;
1184  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1185  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1186  int bytesperpixel = s->bytesperpixel;
1187 
1188  if (bl == BL_8X8) {
1189  av_assert2(b->bl == BL_8X8);
1190  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1191  } else if (td->b->bl == bl) {
1192  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1193  if (b->bp == PARTITION_H && row + hbs < s->rows) {
1194  yoff += hbs * 8 * y_stride;
1195  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1196  ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, b->bl, b->bp);
1197  } else if (b->bp == PARTITION_V && col + hbs < s->cols) {
1198  yoff += hbs * 8 * bytesperpixel;
1199  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1200  ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, b->bl, b->bp);
1201  }
1202  } else {
1203  decode_sb_mem(td, row, col, lflvl, yoff, uvoff, bl + 1);
1204  if (col + hbs < s->cols) { // FIXME why not <=?
1205  if (row + hbs < s->rows) {
1206  decode_sb_mem(td, row, col + hbs, lflvl, yoff + 8 * hbs * bytesperpixel,
1207  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1208  yoff += hbs * 8 * y_stride;
1209  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1210  decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1211  decode_sb_mem(td, row + hbs, col + hbs, lflvl,
1212  yoff + 8 * hbs * bytesperpixel,
1213  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1214  } else {
1215  yoff += hbs * 8 * bytesperpixel;
1216  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1217  decode_sb_mem(td, row, col + hbs, lflvl, yoff, uvoff, bl + 1);
1218  }
1219  } else if (row + hbs < s->rows) {
1220  yoff += hbs * 8 * y_stride;
1221  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1222  decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1223  }
1224  }
1225 }
1226 
1227 static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
1228 {
1229  int sb_start = ( idx * n) >> log2_n;
1230  int sb_end = ((idx + 1) * n) >> log2_n;
1231  *start = FFMIN(sb_start, n) << 3;
1232  *end = FFMIN(sb_end, n) << 3;
1233 }
1234 
1236 {
1237  int i;
1238 
1239  av_freep(&s->intra_pred_data[0]);
1240  for (i = 0; i < s->active_tile_cols; i++)
1241  vp9_tile_data_free(&s->td[i]);
1242 }
1243 
1245 {
1246  VP9Context *s = avctx->priv_data;
1247  int i;
1248 
1249  for (i = 0; i < 3; i++) {
1250  vp9_frame_unref(avctx, &s->s.frames[i]);
1251  av_frame_free(&s->s.frames[i].tf.f);
1252  }
1253  av_buffer_pool_uninit(&s->frame_extradata_pool);
1254  for (i = 0; i < 8; i++) {
1255  ff_thread_release_ext_buffer(avctx, &s->s.refs[i]);
1256  av_frame_free(&s->s.refs[i].f);
1257  ff_thread_release_ext_buffer(avctx, &s->next_refs[i]);
1258  av_frame_free(&s->next_refs[i].f);
1259  }
1260 
1261  free_buffers(s);
1262 #if HAVE_THREADS
1263  av_freep(&s->entries);
1264  ff_pthread_free(s, vp9_context_offsets);
1265 #endif
1266  av_freep(&s->td);
1267  return 0;
1268 }
1269 
1270 static int decode_tiles(AVCodecContext *avctx,
1271  const uint8_t *data, int size)
1272 {
1273  VP9Context *s = avctx->priv_data;
1274  VP9TileData *td = &s->td[0];
1275  int row, col, tile_row, tile_col, ret;
1276  int bytesperpixel;
1277  int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1278  AVFrame *f;
1279  ptrdiff_t yoff, uvoff, ls_y, ls_uv;
1280 
1281  f = s->s.frames[CUR_FRAME].tf.f;
1282  ls_y = f->linesize[0];
1283  ls_uv =f->linesize[1];
1284  bytesperpixel = s->bytesperpixel;
1285 
1286  yoff = uvoff = 0;
1287  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1288  set_tile_offset(&tile_row_start, &tile_row_end,
1289  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1290 
1291  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1292  int64_t tile_size;
1293 
1294  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1295  tile_row == s->s.h.tiling.tile_rows - 1) {
1296  tile_size = size;
1297  } else {
1298  tile_size = AV_RB32(data);
1299  data += 4;
1300  size -= 4;
1301  }
1302  if (tile_size > size)
1303  return AVERROR_INVALIDDATA;
1304  ret = ff_vpx_init_range_decoder(&td->c_b[tile_col], data, tile_size);
1305  if (ret < 0)
1306  return ret;
1307  if (vpx_rac_get_prob_branchy(&td->c_b[tile_col], 128)) // marker bit
1308  return AVERROR_INVALIDDATA;
1309  data += tile_size;
1310  size -= tile_size;
1311  }
1312 
1313  for (row = tile_row_start; row < tile_row_end;
1314  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1315  VP9Filter *lflvl_ptr = s->lflvl;
1316  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1317 
1318  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1319  set_tile_offset(&tile_col_start, &tile_col_end,
1320  tile_col, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1321  td->tile_col_start = tile_col_start;
1322  if (s->pass != 2) {
1323  memset(td->left_partition_ctx, 0, 8);
1324  memset(td->left_skip_ctx, 0, 8);
1325  if (s->s.h.keyframe || s->s.h.intraonly) {
1326  memset(td->left_mode_ctx, DC_PRED, 16);
1327  } else {
1328  memset(td->left_mode_ctx, NEARESTMV, 8);
1329  }
1330  memset(td->left_y_nnz_ctx, 0, 16);
1331  memset(td->left_uv_nnz_ctx, 0, 32);
1332  memset(td->left_segpred_ctx, 0, 8);
1333 
1334  td->c = &td->c_b[tile_col];
1335  }
1336 
1337  for (col = tile_col_start;
1338  col < tile_col_end;
1339  col += 8, yoff2 += 64 * bytesperpixel,
1340  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1341  // FIXME integrate with lf code (i.e. zero after each
1342  // use, similar to invtxfm coefficients, or similar)
1343  if (s->pass != 1) {
1344  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1345  }
1346 
1347  if (s->pass == 2) {
1348  decode_sb_mem(td, row, col, lflvl_ptr,
1349  yoff2, uvoff2, BL_64X64);
1350  } else {
1351  if (vpx_rac_is_end(td->c)) {
1352  return AVERROR_INVALIDDATA;
1353  }
1354  decode_sb(td, row, col, lflvl_ptr,
1355  yoff2, uvoff2, BL_64X64);
1356  }
1357  }
1358  }
1359 
1360  if (s->pass == 1)
1361  continue;
1362 
1363  // backup pre-loopfilter reconstruction data for intra
1364  // prediction of next row of sb64s
1365  if (row + 8 < s->rows) {
1366  memcpy(s->intra_pred_data[0],
1367  f->data[0] + yoff + 63 * ls_y,
1368  8 * s->cols * bytesperpixel);
1369  memcpy(s->intra_pred_data[1],
1370  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1371  8 * s->cols * bytesperpixel >> s->ss_h);
1372  memcpy(s->intra_pred_data[2],
1373  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1374  8 * s->cols * bytesperpixel >> s->ss_h);
1375  }
1376 
1377  // loopfilter one row
1378  if (s->s.h.filter.level) {
1379  yoff2 = yoff;
1380  uvoff2 = uvoff;
1381  lflvl_ptr = s->lflvl;
1382  for (col = 0; col < s->cols;
1383  col += 8, yoff2 += 64 * bytesperpixel,
1384  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1385  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, row, col,
1386  yoff2, uvoff2);
1387  }
1388  }
1389 
1390  // FIXME maybe we can make this more finegrained by running the
1391  // loopfilter per-block instead of after each sbrow
1392  // In fact that would also make intra pred left preparation easier?
1393  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, row >> 3, 0);
1394  }
1395  }
1396  return 0;
1397 }
1398 
1399 #if HAVE_THREADS
1400 static av_always_inline
1401 int decode_tiles_mt(AVCodecContext *avctx, void *tdata, int jobnr,
1402  int threadnr)
1403 {
1404  VP9Context *s = avctx->priv_data;
1405  VP9TileData *td = &s->td[jobnr];
1406  ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1407  int bytesperpixel = s->bytesperpixel, row, col, tile_row;
1408  unsigned tile_cols_len;
1409  int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1410  VP9Filter *lflvl_ptr_base;
1411  AVFrame *f;
1412 
1413  f = s->s.frames[CUR_FRAME].tf.f;
1414  ls_y = f->linesize[0];
1415  ls_uv =f->linesize[1];
1416 
1417  set_tile_offset(&tile_col_start, &tile_col_end,
1418  jobnr, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1419  td->tile_col_start = tile_col_start;
1420  uvoff = (64 * bytesperpixel >> s->ss_h)*(tile_col_start >> 3);
1421  yoff = (64 * bytesperpixel)*(tile_col_start >> 3);
1422  lflvl_ptr_base = s->lflvl+(tile_col_start >> 3);
1423 
1424  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1425  set_tile_offset(&tile_row_start, &tile_row_end,
1426  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1427 
1428  td->c = &td->c_b[tile_row];
1429  for (row = tile_row_start; row < tile_row_end;
1430  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1431  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1432  VP9Filter *lflvl_ptr = lflvl_ptr_base+s->sb_cols*(row >> 3);
1433 
1434  memset(td->left_partition_ctx, 0, 8);
1435  memset(td->left_skip_ctx, 0, 8);
1436  if (s->s.h.keyframe || s->s.h.intraonly) {
1437  memset(td->left_mode_ctx, DC_PRED, 16);
1438  } else {
1439  memset(td->left_mode_ctx, NEARESTMV, 8);
1440  }
1441  memset(td->left_y_nnz_ctx, 0, 16);
1442  memset(td->left_uv_nnz_ctx, 0, 32);
1443  memset(td->left_segpred_ctx, 0, 8);
1444 
1445  for (col = tile_col_start;
1446  col < tile_col_end;
1447  col += 8, yoff2 += 64 * bytesperpixel,
1448  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1449  // FIXME integrate with lf code (i.e. zero after each
1450  // use, similar to invtxfm coefficients, or similar)
1451  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1452  decode_sb(td, row, col, lflvl_ptr,
1453  yoff2, uvoff2, BL_64X64);
1454  }
1455 
1456  // backup pre-loopfilter reconstruction data for intra
1457  // prediction of next row of sb64s
1458  tile_cols_len = tile_col_end - tile_col_start;
1459  if (row + 8 < s->rows) {
1460  memcpy(s->intra_pred_data[0] + (tile_col_start * 8 * bytesperpixel),
1461  f->data[0] + yoff + 63 * ls_y,
1462  8 * tile_cols_len * bytesperpixel);
1463  memcpy(s->intra_pred_data[1] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
1464  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1465  8 * tile_cols_len * bytesperpixel >> s->ss_h);
1466  memcpy(s->intra_pred_data[2] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
1467  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1468  8 * tile_cols_len * bytesperpixel >> s->ss_h);
1469  }
1470 
1471  vp9_report_tile_progress(s, row >> 3, 1);
1472  }
1473  }
1474  return 0;
1475 }
1476 
1477 static av_always_inline
1478 int loopfilter_proc(AVCodecContext *avctx)
1479 {
1480  VP9Context *s = avctx->priv_data;
1481  ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1482  VP9Filter *lflvl_ptr;
1483  int bytesperpixel = s->bytesperpixel, col, i;
1484  AVFrame *f;
1485 
1486  f = s->s.frames[CUR_FRAME].tf.f;
1487  ls_y = f->linesize[0];
1488  ls_uv =f->linesize[1];
1489 
1490  for (i = 0; i < s->sb_rows; i++) {
1491  vp9_await_tile_progress(s, i, s->s.h.tiling.tile_cols);
1492 
1493  if (s->s.h.filter.level) {
1494  yoff = (ls_y * 64)*i;
1495  uvoff = (ls_uv * 64 >> s->ss_v)*i;
1496  lflvl_ptr = s->lflvl+s->sb_cols*i;
1497  for (col = 0; col < s->cols;
1498  col += 8, yoff += 64 * bytesperpixel,
1499  uvoff += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1500  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, i << 3, col,
1501  yoff, uvoff);
1502  }
1503  }
1504  }
1505  return 0;
1506 }
1507 #endif
1508 
1510 {
1511  AVVideoEncParams *par;
1512  unsigned int tile, nb_blocks = 0;
1513 
1514  if (s->s.h.segmentation.enabled) {
1515  for (tile = 0; tile < s->active_tile_cols; tile++)
1516  nb_blocks += s->td[tile].nb_block_structure;
1517  }
1518 
1520  AV_VIDEO_ENC_PARAMS_VP9, nb_blocks);
1521  if (!par)
1522  return AVERROR(ENOMEM);
1523 
1524  par->qp = s->s.h.yac_qi;
1525  par->delta_qp[0][0] = s->s.h.ydc_qdelta;
1526  par->delta_qp[1][0] = s->s.h.uvdc_qdelta;
1527  par->delta_qp[2][0] = s->s.h.uvdc_qdelta;
1528  par->delta_qp[1][1] = s->s.h.uvac_qdelta;
1529  par->delta_qp[2][1] = s->s.h.uvac_qdelta;
1530 
1531  if (nb_blocks) {
1532  unsigned int block = 0;
1533  unsigned int tile, block_tile;
1534 
1535  for (tile = 0; tile < s->active_tile_cols; tile++) {
1536  VP9TileData *td = &s->td[tile];
1537 
1538  for (block_tile = 0; block_tile < td->nb_block_structure; block_tile++) {
1540  unsigned int row = td->block_structure[block_tile].row;
1541  unsigned int col = td->block_structure[block_tile].col;
1542  uint8_t seg_id = frame->segmentation_map[row * 8 * s->sb_cols + col];
1543 
1544  b->src_x = col * 8;
1545  b->src_y = row * 8;
1546  b->w = 1 << (3 + td->block_structure[block_tile].block_size_idx_x);
1547  b->h = 1 << (3 + td->block_structure[block_tile].block_size_idx_y);
1548 
1549  if (s->s.h.segmentation.feat[seg_id].q_enabled) {
1550  b->delta_qp = s->s.h.segmentation.feat[seg_id].q_val;
1551  if (s->s.h.segmentation.absolute_vals)
1552  b->delta_qp -= par->qp;
1553  }
1554  }
1555  }
1556  }
1557 
1558  return 0;
1559 }
1560 
1562  int *got_frame, AVPacket *pkt)
1563 {
1564  const uint8_t *data = pkt->data;
1565  int size = pkt->size;
1566  VP9Context *s = avctx->priv_data;
1567  int ret, i, j, ref;
1568  int retain_segmap_ref = s->s.frames[REF_FRAME_SEGMAP].segmentation_map &&
1569  (!s->s.h.segmentation.enabled || !s->s.h.segmentation.update_map);
1570  AVFrame *f;
1571 
1572  if ((ret = decode_frame_header(avctx, data, size, &ref)) < 0) {
1573  return ret;
1574  } else if (ret == 0) {
1575  if (!s->s.refs[ref].f->buf[0]) {
1576  av_log(avctx, AV_LOG_ERROR, "Requested reference %d not available\n", ref);
1577  return AVERROR_INVALIDDATA;
1578  }
1579  if ((ret = av_frame_ref(frame, s->s.refs[ref].f)) < 0)
1580  return ret;
1581  frame->pts = pkt->pts;
1582  frame->pkt_dts = pkt->dts;
1583  for (i = 0; i < 8; i++) {
1584  if (s->next_refs[i].f->buf[0])
1585  ff_thread_release_ext_buffer(avctx, &s->next_refs[i]);
1586  if (s->s.refs[i].f->buf[0] &&
1587  (ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i])) < 0)
1588  return ret;
1589  }
1590  *got_frame = 1;
1591  return pkt->size;
1592  }
1593  data += ret;
1594  size -= ret;
1595 
1596  if (!retain_segmap_ref || s->s.h.keyframe || s->s.h.intraonly) {
1597  if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0])
1598  vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_SEGMAP]);
1599  if (!s->s.h.keyframe && !s->s.h.intraonly && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
1600  (ret = vp9_frame_ref(avctx, &s->s.frames[REF_FRAME_SEGMAP], &s->s.frames[CUR_FRAME])) < 0)
1601  return ret;
1602  }
1603  if (s->s.frames[REF_FRAME_MVPAIR].tf.f->buf[0])
1604  vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_MVPAIR]);
1605  if (!s->s.h.intraonly && !s->s.h.keyframe && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
1606  (ret = vp9_frame_ref(avctx, &s->s.frames[REF_FRAME_MVPAIR], &s->s.frames[CUR_FRAME])) < 0)
1607  return ret;
1608  if (s->s.frames[CUR_FRAME].tf.f->buf[0])
1609  vp9_frame_unref(avctx, &s->s.frames[CUR_FRAME]);
1610  if ((ret = vp9_frame_alloc(avctx, &s->s.frames[CUR_FRAME])) < 0)
1611  return ret;
1612  f = s->s.frames[CUR_FRAME].tf.f;
1613  if (s->s.h.keyframe)
1614  f->flags |= AV_FRAME_FLAG_KEY;
1615  else
1616  f->flags &= ~AV_FRAME_FLAG_KEY;
1617  f->pict_type = (s->s.h.keyframe || s->s.h.intraonly) ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1618 
1619  if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0] &&
1620  (s->s.frames[REF_FRAME_MVPAIR].tf.f->width != s->s.frames[CUR_FRAME].tf.f->width ||
1621  s->s.frames[REF_FRAME_MVPAIR].tf.f->height != s->s.frames[CUR_FRAME].tf.f->height)) {
1622  vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_SEGMAP]);
1623  }
1624 
1625  // ref frame setup
1626  for (i = 0; i < 8; i++) {
1627  if (s->next_refs[i].f->buf[0])
1628  ff_thread_release_ext_buffer(avctx, &s->next_refs[i]);
1629  if (s->s.h.refreshrefmask & (1 << i)) {
1630  ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.frames[CUR_FRAME].tf);
1631  } else if (s->s.refs[i].f->buf[0]) {
1632  ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i]);
1633  }
1634  if (ret < 0)
1635  return ret;
1636  }
1637 
1638  if (avctx->hwaccel) {
1639  const FFHWAccel *hwaccel = ffhwaccel(avctx->hwaccel);
1640  ret = hwaccel->start_frame(avctx, NULL, 0);
1641  if (ret < 0)
1642  return ret;
1643  ret = hwaccel->decode_slice(avctx, pkt->data, pkt->size);
1644  if (ret < 0)
1645  return ret;
1646  ret = hwaccel->end_frame(avctx);
1647  if (ret < 0)
1648  return ret;
1649  goto finish;
1650  }
1651 
1652  // main tile decode loop
1653  memset(s->above_partition_ctx, 0, s->cols);
1654  memset(s->above_skip_ctx, 0, s->cols);
1655  if (s->s.h.keyframe || s->s.h.intraonly) {
1656  memset(s->above_mode_ctx, DC_PRED, s->cols * 2);
1657  } else {
1658  memset(s->above_mode_ctx, NEARESTMV, s->cols);
1659  }
1660  memset(s->above_y_nnz_ctx, 0, s->sb_cols * 16);
1661  memset(s->above_uv_nnz_ctx[0], 0, s->sb_cols * 16 >> s->ss_h);
1662  memset(s->above_uv_nnz_ctx[1], 0, s->sb_cols * 16 >> s->ss_h);
1663  memset(s->above_segpred_ctx, 0, s->cols);
1664  s->pass = s->s.frames[CUR_FRAME].uses_2pass =
1665  avctx->active_thread_type == FF_THREAD_FRAME && s->s.h.refreshctx && !s->s.h.parallelmode;
1666  if ((ret = update_block_buffers(avctx)) < 0) {
1667  av_log(avctx, AV_LOG_ERROR,
1668  "Failed to allocate block buffers\n");
1669  return ret;
1670  }
1671  if (s->s.h.refreshctx && s->s.h.parallelmode) {
1672  int j, k, l, m;
1673 
1674  for (i = 0; i < 4; i++) {
1675  for (j = 0; j < 2; j++)
1676  for (k = 0; k < 2; k++)
1677  for (l = 0; l < 6; l++)
1678  for (m = 0; m < 6; m++)
1679  memcpy(s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m],
1680  s->prob.coef[i][j][k][l][m], 3);
1681  if (s->s.h.txfmmode == i)
1682  break;
1683  }
1684  s->prob_ctx[s->s.h.framectxid].p = s->prob.p;
1685  ff_thread_finish_setup(avctx);
1686  } else if (!s->s.h.refreshctx) {
1687  ff_thread_finish_setup(avctx);
1688  }
1689 
1690 #if HAVE_THREADS
1691  if (avctx->active_thread_type & FF_THREAD_SLICE) {
1692  for (i = 0; i < s->sb_rows; i++)
1693  atomic_store(&s->entries[i], 0);
1694  }
1695 #endif
1696 
1697  do {
1698  for (i = 0; i < s->active_tile_cols; i++) {
1699  s->td[i].b = s->td[i].b_base;
1700  s->td[i].block = s->td[i].block_base;
1701  s->td[i].uvblock[0] = s->td[i].uvblock_base[0];
1702  s->td[i].uvblock[1] = s->td[i].uvblock_base[1];
1703  s->td[i].eob = s->td[i].eob_base;
1704  s->td[i].uveob[0] = s->td[i].uveob_base[0];
1705  s->td[i].uveob[1] = s->td[i].uveob_base[1];
1706  s->td[i].error_info = 0;
1707  }
1708 
1709 #if HAVE_THREADS
1710  if (avctx->active_thread_type == FF_THREAD_SLICE) {
1711  int tile_row, tile_col;
1712 
1713  av_assert1(!s->pass);
1714 
1715  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1716  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1717  int64_t tile_size;
1718 
1719  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1720  tile_row == s->s.h.tiling.tile_rows - 1) {
1721  tile_size = size;
1722  } else {
1723  tile_size = AV_RB32(data);
1724  data += 4;
1725  size -= 4;
1726  }
1727  if (tile_size > size)
1728  return AVERROR_INVALIDDATA;
1729  ret = ff_vpx_init_range_decoder(&s->td[tile_col].c_b[tile_row], data, tile_size);
1730  if (ret < 0)
1731  return ret;
1732  if (vpx_rac_get_prob_branchy(&s->td[tile_col].c_b[tile_row], 128)) // marker bit
1733  return AVERROR_INVALIDDATA;
1734  data += tile_size;
1735  size -= tile_size;
1736  }
1737  }
1738 
1739  ff_slice_thread_execute_with_mainfunc(avctx, decode_tiles_mt, loopfilter_proc, s->td, NULL, s->s.h.tiling.tile_cols);
1740  } else
1741 #endif
1742  {
1743  ret = decode_tiles(avctx, data, size);
1744  if (ret < 0) {
1745  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1746  return ret;
1747  }
1748  }
1749 
1750  // Sum all counts fields into td[0].counts for tile threading
1751  if (avctx->active_thread_type == FF_THREAD_SLICE)
1752  for (i = 1; i < s->s.h.tiling.tile_cols; i++)
1753  for (j = 0; j < sizeof(s->td[i].counts) / sizeof(unsigned); j++)
1754  ((unsigned *)&s->td[0].counts)[j] += ((unsigned *)&s->td[i].counts)[j];
1755 
1756  if (s->pass < 2 && s->s.h.refreshctx && !s->s.h.parallelmode) {
1758  ff_thread_finish_setup(avctx);
1759  }
1760  } while (s->pass++ == 1);
1761  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1762 
1763  if (s->td->error_info < 0) {
1764  av_log(avctx, AV_LOG_ERROR, "Failed to decode tile data\n");
1765  s->td->error_info = 0;
1766  return AVERROR_INVALIDDATA;
1767  }
1769  ret = vp9_export_enc_params(s, &s->s.frames[CUR_FRAME]);
1770  if (ret < 0)
1771  return ret;
1772  }
1773 
1774 finish:
1775  // ref frame setup
1776  for (i = 0; i < 8; i++) {
1777  if (s->s.refs[i].f->buf[0])
1778  ff_thread_release_ext_buffer(avctx, &s->s.refs[i]);
1779  if (s->next_refs[i].f->buf[0] &&
1780  (ret = ff_thread_ref_frame(&s->s.refs[i], &s->next_refs[i])) < 0)
1781  return ret;
1782  }
1783 
1784  if (!s->s.h.invisible) {
1785  if ((ret = av_frame_ref(frame, s->s.frames[CUR_FRAME].tf.f)) < 0)
1786  return ret;
1787  *got_frame = 1;
1788  }
1789 
1790  return pkt->size;
1791 }
1792 
1794 {
1795  VP9Context *s = avctx->priv_data;
1796  int i;
1797 
1798  for (i = 0; i < 3; i++)
1799  vp9_frame_unref(avctx, &s->s.frames[i]);
1800  for (i = 0; i < 8; i++)
1801  ff_thread_release_ext_buffer(avctx, &s->s.refs[i]);
1802 
1803  if (FF_HW_HAS_CB(avctx, flush))
1804  FF_HW_SIMPLE_CALL(avctx, flush);
1805 }
1806 
1808 {
1809  VP9Context *s = avctx->priv_data;
1810  int ret;
1811 
1812  s->last_bpp = 0;
1813  s->s.h.filter.sharpness = -1;
1814 
1815 #if HAVE_THREADS
1816  if (avctx->active_thread_type & FF_THREAD_SLICE) {
1817  ret = ff_pthread_init(s, vp9_context_offsets);
1818  if (ret < 0)
1819  return ret;
1820  }
1821 #endif
1822 
1823  for (int i = 0; i < 3; i++) {
1824  s->s.frames[i].tf.f = av_frame_alloc();
1825  if (!s->s.frames[i].tf.f)
1826  return AVERROR(ENOMEM);
1827  }
1828  for (int i = 0; i < 8; i++) {
1829  s->s.refs[i].f = av_frame_alloc();
1830  s->next_refs[i].f = av_frame_alloc();
1831  if (!s->s.refs[i].f || !s->next_refs[i].f)
1832  return AVERROR(ENOMEM);
1833  }
1834  return 0;
1835 }
1836 
1837 #if HAVE_THREADS
1838 static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
1839 {
1840  int i, ret;
1841  VP9Context *s = dst->priv_data, *ssrc = src->priv_data;
1842 
1843  for (i = 0; i < 3; i++) {
1844  if (s->s.frames[i].tf.f->buf[0])
1845  vp9_frame_unref(dst, &s->s.frames[i]);
1846  if (ssrc->s.frames[i].tf.f->buf[0]) {
1847  if ((ret = vp9_frame_ref(dst, &s->s.frames[i], &ssrc->s.frames[i])) < 0)
1848  return ret;
1849  }
1850  }
1851  for (i = 0; i < 8; i++) {
1852  if (s->s.refs[i].f->buf[0])
1853  ff_thread_release_ext_buffer(dst, &s->s.refs[i]);
1854  if (ssrc->next_refs[i].f->buf[0]) {
1855  if ((ret = ff_thread_ref_frame(&s->s.refs[i], &ssrc->next_refs[i])) < 0)
1856  return ret;
1857  }
1858  }
1859 
1860  s->s.h.invisible = ssrc->s.h.invisible;
1861  s->s.h.keyframe = ssrc->s.h.keyframe;
1862  s->s.h.intraonly = ssrc->s.h.intraonly;
1863  s->ss_v = ssrc->ss_v;
1864  s->ss_h = ssrc->ss_h;
1865  s->s.h.segmentation.enabled = ssrc->s.h.segmentation.enabled;
1866  s->s.h.segmentation.update_map = ssrc->s.h.segmentation.update_map;
1867  s->s.h.segmentation.absolute_vals = ssrc->s.h.segmentation.absolute_vals;
1868  s->bytesperpixel = ssrc->bytesperpixel;
1869  s->gf_fmt = ssrc->gf_fmt;
1870  s->w = ssrc->w;
1871  s->h = ssrc->h;
1872  s->s.h.bpp = ssrc->s.h.bpp;
1873  s->bpp_index = ssrc->bpp_index;
1874  s->pix_fmt = ssrc->pix_fmt;
1875  memcpy(&s->prob_ctx, &ssrc->prob_ctx, sizeof(s->prob_ctx));
1876  memcpy(&s->s.h.lf_delta, &ssrc->s.h.lf_delta, sizeof(s->s.h.lf_delta));
1877  memcpy(&s->s.h.segmentation.feat, &ssrc->s.h.segmentation.feat,
1878  sizeof(s->s.h.segmentation.feat));
1879 
1880  return 0;
1881 }
1882 #endif
1883 
1885  .p.name = "vp9",
1886  CODEC_LONG_NAME("Google VP9"),
1887  .p.type = AVMEDIA_TYPE_VIDEO,
1888  .p.id = AV_CODEC_ID_VP9,
1889  .priv_data_size = sizeof(VP9Context),
1890  .init = vp9_decode_init,
1891  .close = vp9_decode_free,
1894  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
1897  .flush = vp9_decode_flush,
1898  UPDATE_THREAD_CONTEXT(vp9_decode_update_thread_context),
1899  .p.profiles = NULL_IF_CONFIG_SMALL(ff_vp9_profiles),
1900  .bsfs = "vp9_superframe_split",
1901  .hw_configs = (const AVCodecHWConfigInternal *const []) {
1902 #if CONFIG_VP9_DXVA2_HWACCEL
1903  HWACCEL_DXVA2(vp9),
1904 #endif
1905 #if CONFIG_VP9_D3D11VA_HWACCEL
1906  HWACCEL_D3D11VA(vp9),
1907 #endif
1908 #if CONFIG_VP9_D3D11VA2_HWACCEL
1909  HWACCEL_D3D11VA2(vp9),
1910 #endif
1911 #if CONFIG_VP9_NVDEC_HWACCEL
1912  HWACCEL_NVDEC(vp9),
1913 #endif
1914 #if CONFIG_VP9_VAAPI_HWACCEL
1915  HWACCEL_VAAPI(vp9),
1916 #endif
1917 #if CONFIG_VP9_VDPAU_HWACCEL
1918  HWACCEL_VDPAU(vp9),
1919 #endif
1920 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
1921  HWACCEL_VIDEOTOOLBOX(vp9),
1922 #endif
1923  NULL
1924  },
1925 };
AVVideoEncParams::qp
int32_t qp
Base quantisation parameter for the frame.
Definition: video_enc_params.h:103
hwconfig.h
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1435
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:280
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:253
FF_CODEC_CAP_SLICE_THREAD_HAS_MF
#define FF_CODEC_CAP_SLICE_THREAD_HAS_MF
Codec initializes slice-based threading with a main function.
Definition: codec_internal.h:64
decode_tiles
static int decode_tiles(AVCodecContext *avctx, const uint8_t *data, int size)
Definition: vp9.c:1270
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
vp9_frame_alloc
static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
Definition: vp9.c:108
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
PRED_SWITCHABLE
@ PRED_SWITCHABLE
Definition: vp9shared.h:52
PRED_SINGLEREF
@ PRED_SINGLEREF
Definition: vp9shared.h:50
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1029
ff_thread_release_ext_buffer
void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f)
Unref a ThreadFrame.
Definition: pthread_frame.c:1023
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1268
VP9Frame::segmentation_map
uint8_t * segmentation_map
Definition: vp9shared.h:68
VP9Frame
Definition: vp9shared.h:65
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:120
ff_vp9_decoder
const FFCodec ff_vp9_decoder
Definition: vp9.c:1884
decode_sb
static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1099
ff_vp9_adapt_probs
void ff_vp9_adapt_probs(VP9Context *s)
Definition: vp9prob.c:44
vp9_decode_flush
static void vp9_decode_flush(AVCodecContext *avctx)
Definition: vp9.c:1793
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:266
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
PRED_COMPREF
@ PRED_COMPREF
Definition: vp9shared.h:51
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
pixdesc.h
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:64
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:673
BlockPartition
BlockPartition
Definition: vp9shared.h:35
AVPacket::data
uint8_t * data
Definition: packet.h:374
DC_PRED
@ DC_PRED
Definition: vp9.h:48
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:66
b
#define b
Definition: input.c:41
data
const char data[16]
Definition: mxf.c:148
update_size
static int update_size(AVCodecContext *avctx, int w, int h)
Definition: vp9.c:180
decode_sb_mem
static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1178
REF_FRAME_SEGMAP
#define REF_FRAME_SEGMAP
Definition: vp9shared.h:171
decode_frame_header
static int decode_frame_header(AVCodecContext *avctx, const uint8_t *data, int size, int *ref)
Definition: vp9.c:514
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:468
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:247
FFCodec
Definition: codec_internal.h:127
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:600
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:173
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
VP9_SYNCCODE
#define VP9_SYNCCODE
Definition: vp9.c:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
vp89_rac.h
VP9Frame::hwaccel_priv_buf
AVBufferRef * hwaccel_priv_buf
Definition: vp9shared.h:72
VP9Filter
Definition: vp9dec.h:79
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:93
VPXRangeCoder
Definition: vpx_rac.h:35
thread.h
ff_pthread_free
av_cold void ff_pthread_free(void *obj, const unsigned offsets[])
Definition: pthread.c:91
FILTER_SWITCHABLE
@ FILTER_SWITCHABLE
Definition: vp9.h:70
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
VP9Block
Definition: vp9dec.h:85
VP9Frame::tf
ThreadFrame tf
Definition: vp9shared.h:66
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:381
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:605
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
AVCOL_SPC_RESERVED
@ AVCOL_SPC_RESERVED
reserved for future use by ITU-T and ISO/IEC just like 15-255 are
Definition: pixfmt.h:603
VP9Frame::extradata
AVBufferRef * extradata
Definition: vp9shared.h:67
TX_SWITCHABLE
@ TX_SWITCHABLE
Definition: vp9.h:33
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
finish
static void finish(void)
Definition: movenc.c:342
FFHWAccel
Definition: hwaccel_internal.h:33
ff_vp9_ac_qlookup
const int16_t ff_vp9_ac_qlookup[3][256]
Definition: vp9data.c:334
AVVideoEncParams::delta_qp
int32_t delta_qp[4][2]
Quantisation parameter offset from the base (per-frame) qp for a given plane (first index) and AC/DC ...
Definition: video_enc_params.h:109
fail
#define fail()
Definition: checkasm.h:138
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:484
GetBitContext
Definition: get_bits.h:108
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:521
FFHWAccel::end_frame
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: hwaccel_internal.h:96
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:72
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
PARTITION_NONE
@ PARTITION_NONE
Definition: vp9shared.h:36
VP9Frame::hwaccel_picture_private
void * hwaccel_picture_private
Definition: vp9shared.h:73
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:73
vp9_decode_free
static av_cold int vp9_decode_free(AVCodecContext *avctx)
Definition: vp9.c:1244
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:88
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:471
FFHWAccel::start_frame
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: hwaccel_internal.h:58
avassert.h
ff_vp9_model_pareto8
const uint8_t ff_vp9_model_pareto8[256][8]
Definition: vp9data.c:1176
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1903
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:628
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:586
BL_8X8
@ BL_8X8
Definition: vp9shared.h:80
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:384
PARTITION_V
@ PARTITION_V
Definition: vp9shared.h:38
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:306
vp9_frame_ref
static int vp9_frame_ref(AVCodecContext *avctx, VP9Frame *dst, VP9Frame *src)
Definition: vp9.c:150
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:127
s
#define s(width, name)
Definition: cbs_vp9.c:198
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:606
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:421
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:220
vp9data.h
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:365
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:297
decode.h
get_bits.h
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
ff_vp9dsp_init
av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact)
Definition: vp9dsp.c:88
ff_vp9_partition_tree
const int8_t ff_vp9_partition_tree[3][2]
Definition: vp9data.c:35
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
vp9_decode_frame
static int vp9_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: vp9.c:1561
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:870
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
threadframe.h
NULL
#define NULL
Definition: coverity.c:32
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1039
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
hwaccel_internal.h
VP9Context
Definition: vp9dec.h:97
REF_FRAME_MVPAIR
#define REF_FRAME_MVPAIR
Definition: vp9shared.h:170
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
vp89_rac_get_uint
static av_unused int vp89_rac_get_uint(VPXRangeCoder *c, int bits)
Definition: vp89_rac.h:41
profiles.h
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:322
AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:470
pthread_internal.h
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:281
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:469
VP9mv
Definition: vp9shared.h:55
PARTITION_SPLIT
@ PARTITION_SPLIT
Definition: vp9shared.h:39
FF_HW_HAS_CB
#define FF_HW_HAS_CB(avctx, function)
Definition: hwaccel_internal.h:176
atomic_load_explicit
#define atomic_load_explicit(object, order)
Definition: stdatomic.h:96
ff_hwaccel_frame_priv_alloc
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private, AVBufferRef **hwaccel_priv_buf)
Allocate a hwaccel frame private data if the provided avctx uses a hwaccel method that needs it.
Definition: decode.c:1800
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
pthread_mutex_unlock
#define pthread_mutex_unlock(a)
Definition: ffprobe.c:79
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:58
vp9.h
VP9Frame::uses_2pass
int uses_2pass
Definition: vp9shared.h:70
f
f
Definition: af_crystalizer.c:121
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:375
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:361
codec_internal.h
pix_fmt_rgb
static enum AVPixelFormat pix_fmt_rgb[3]
Definition: libdav1d.c:65
REF_INVALID_SCALE
#define REF_INVALID_SCALE
Definition: vp9dec.h:43
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
read_colorspace_details
static int read_colorspace_details(AVCodecContext *avctx)
Definition: vp9.c:452
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:473
size
int size
Definition: twinvq_data.h:10344
vp9_alloc_entries
static int vp9_alloc_entries(AVCodecContext *avctx, int n)
Definition: vp9.c:89
atomic_fetch_add_explicit
#define atomic_fetch_add_explicit(object, operand, order)
Definition: stdatomic.h:149
FF_CODEC_CAP_ALLOCATE_PROGRESS
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
Definition: codec_internal.h:69
free_buffers
static void free_buffers(VP9Context *s)
Definition: vp9.c:1235
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:475
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1544
AVCodecHWConfigInternal
Definition: hwconfig.h:25
TX_4X4
@ TX_4X4
Definition: vp9.h:28
update_block_buffers
static int update_block_buffers(AVCodecContext *avctx)
Definition: vp9.c:317
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:373
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:114
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:78
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:333
inv_recenter_nonneg
static av_always_inline int inv_recenter_nonneg(int v, int m)
Definition: vp9.c:384
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
vpx_rac_is_end
static av_always_inline int vpx_rac_is_end(VPXRangeCoder *c)
returns 1 if the end of the stream has been reached, 0 otherwise.
Definition: vpx_rac.h:51
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1543
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:187
ff_slice_thread_execute_with_mainfunc
int ff_slice_thread_execute_with_mainfunc(AVCodecContext *avctx, action_func2 *func2, main_func *mainfunc, void *arg, int *ret, int job_count)
Definition: pthread_slice.c:126
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
Definition: pixfmt.h:607
assign
#define assign(var, type, n)
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:302
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
update_prob
static int update_prob(VPXRangeCoder *c, int p)
Definition: vp9.c:394
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:244
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:367
DEFINE_OFFSET_ARRAY
#define DEFINE_OFFSET_ARRAY(type, name, cnt_variable, mutexes, conds)
Definition: pthread_internal.h:61
AVCodecContext::properties
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1902
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:610
vpx_rac.h
decode012
static int BS_FUNC() decode012(BSCTX *bc)
Return decoded truncated unary code for the values 0, 1, 2.
Definition: bitstream_template.h:436
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:485
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:599
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
vpx_rac_get_prob_branchy
static av_always_inline int vpx_rac_get_prob_branchy(VPXRangeCoder *c, int prob)
Definition: vpx_rac.h:99
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:120
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
get_sbits_inv
static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
Definition: vp9.c:378
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:602
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:658
ff_thread_get_ext_buffer
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around ff_get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:981
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:656
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:74
avcodec.h
limit
static double limit(double x)
Definition: vf_pseudocolor.c:142
vp89_rac_get_tree
static av_always_inline int vp89_rac_get_tree(VPXRangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
Definition: vp89_rac.h:54
BL_64X64
@ BL_64X64
Definition: vp9shared.h:77
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
vp9_decode_init
static av_cold int vp9_decode_init(AVCodecContext *avctx)
Definition: vp9.c:1807
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:561
ff_vpx_init_range_decoder
int ff_vpx_init_range_decoder(VPXRangeCoder *c, const uint8_t *buf, int buf_size)
Definition: vpx_rac.c:42
vp9_tile_data_free
static void vp9_tile_data_free(VP9TileData *td)
Definition: vp9.c:92
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
VP9mvrefPair
Definition: vp9shared.h:60
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:472
pthread_cond_signal
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
Definition: os2threads.h:152
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:411
VP9TileData
Definition: vp9dec.h:168
vp89_rac_get
static av_always_inline int vp89_rac_get(VPXRangeCoder *c)
Definition: vp89_rac.h:36
AVCodecContext
main external API structure.
Definition: avcodec.h:441
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1551
VP9Filter::mask
uint8_t mask[2][2][8][4]
Definition: vp9dec.h:82
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
vp9_frame_unref
static void vp9_frame_unref(AVCodecContext *avctx, VP9Frame *f)
Definition: vp9.c:99
VP9Frame::mv
VP9mvrefPair * mv
Definition: vp9shared.h:69
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1596
ffhwaccel
static const FFHWAccel * ffhwaccel(const AVHWAccel *codec)
Definition: hwaccel_internal.h:165
FFHWAccel::decode_slice
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: hwaccel_internal.h:85
ff_vp9_decode_block
void ff_vp9_decode_block(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl, enum BlockPartition bp)
Definition: vp9block.c:1263
NEARESTMV
@ NEARESTMV
Definition: vp9shared.h:43
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
BlockLevel
BlockLevel
Definition: vp9shared.h:76
AVCodecContext::export_side_data
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame,...
Definition: avcodec.h:2055
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
ff_pthread_init
av_cold int ff_pthread_init(void *obj, const unsigned offsets[])
Initialize/destroy a list of mutexes/conditions contained in a structure.
Definition: pthread.c:104
pthread_cond_wait
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
Definition: os2threads.h:192
vp9dec.h
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_vp9_default_kf_partition_probs
const uint8_t ff_vp9_default_kf_partition_probs[4][4][3]
Definition: vp9data.c:41
AV_VIDEO_ENC_PARAMS_VP9
@ AV_VIDEO_ENC_PARAMS_VP9
VP9 stores:
Definition: video_enc_params.h:44
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
flush
void(* flush)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:367
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:338
ff_vp9_default_probs
const ProbContext ff_vp9_default_probs
Definition: vp9data.c:1435
CUR_FRAME
#define CUR_FRAME
Definition: vp9shared.h:169
vp9_export_enc_params
static int vp9_export_enc_params(VP9Context *s, VP9Frame *frame)
Definition: vp9.c:1509
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:468
PARTITION_H
@ PARTITION_H
Definition: vp9shared.h:37
ff_vp9_loopfilter_sb
void ff_vp9_loopfilter_sb(AVCodecContext *avctx, VP9Filter *lflvl, int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff)
Definition: vp9lpf.c:178
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
videodsp.h
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
d
d
Definition: ffmpeg_filter.c:331
HWACCEL_MAX
#define HWACCEL_MAX
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Definition: video_enc_params.h:143
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:474
h
h
Definition: vp9dsp_template.c:2038
atomic_init
#define atomic_init(obj, value)
Definition: stdatomic.h:33
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:601
ff_vp9_profiles
const AVProfile ff_vp9_profiles[]
Definition: profiles.c:152
video_enc_params.h
set_tile_offset
static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
Definition: vp9.c:1227
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2884
ff_vp9_dc_qlookup
const int16_t ff_vp9_dc_qlookup[3][256]
Definition: vp9data.c:231
pthread_mutex_lock
#define pthread_mutex_lock(a)
Definition: ffprobe.c:75
ff_vp9_default_coef_probs
const uint8_t ff_vp9_default_coef_probs[4][2][2][6][6][3]
Definition: vp9data.c:1540