FFmpeg
vp9.c
Go to the documentation of this file.
1 /*
2  * VP9 compatible video decoder
3  *
4  * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5  * Copyright (C) 2013 Clément Bœsch <u pkh me>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "config_components.h"
25 
26 #include "avcodec.h"
27 #include "codec_internal.h"
28 #include "get_bits.h"
29 #include "hwconfig.h"
30 #include "internal.h"
31 #include "profiles.h"
32 #include "thread.h"
33 #include "threadframe.h"
34 #include "pthread_internal.h"
35 
36 #include "videodsp.h"
37 #include "vp89_rac.h"
38 #include "vp9.h"
39 #include "vp9data.h"
40 #include "vp9dec.h"
41 #include "vpx_rac.h"
42 #include "libavutil/avassert.h"
43 #include "libavutil/pixdesc.h"
45 
46 #define VP9_SYNCCODE 0x498342
47 
48 #if HAVE_THREADS
49 DEFINE_OFFSET_ARRAY(VP9Context, vp9_context, pthread_init_cnt,
50  (offsetof(VP9Context, progress_mutex)),
51  (offsetof(VP9Context, progress_cond)));
52 
53 static int vp9_alloc_entries(AVCodecContext *avctx, int n) {
54  VP9Context *s = avctx->priv_data;
55  int i;
56 
57  if (avctx->active_thread_type & FF_THREAD_SLICE) {
58  if (s->entries)
59  av_freep(&s->entries);
60 
61  s->entries = av_malloc_array(n, sizeof(atomic_int));
62  if (!s->entries)
63  return AVERROR(ENOMEM);
64 
65  for (i = 0; i < n; i++)
66  atomic_init(&s->entries[i], 0);
67  }
68  return 0;
69 }
70 
71 static void vp9_report_tile_progress(VP9Context *s, int field, int n) {
72  pthread_mutex_lock(&s->progress_mutex);
73  atomic_fetch_add_explicit(&s->entries[field], n, memory_order_release);
74  pthread_cond_signal(&s->progress_cond);
75  pthread_mutex_unlock(&s->progress_mutex);
76 }
77 
78 static void vp9_await_tile_progress(VP9Context *s, int field, int n) {
79  if (atomic_load_explicit(&s->entries[field], memory_order_acquire) >= n)
80  return;
81 
82  pthread_mutex_lock(&s->progress_mutex);
83  while (atomic_load_explicit(&s->entries[field], memory_order_relaxed) != n)
84  pthread_cond_wait(&s->progress_cond, &s->progress_mutex);
85  pthread_mutex_unlock(&s->progress_mutex);
86 }
87 #else
88 static int vp9_alloc_entries(AVCodecContext *avctx, int n) { return 0; }
89 #endif
90 
92 {
93  av_freep(&td->b_base);
94  av_freep(&td->block_base);
95  av_freep(&td->block_structure);
96 }
97 
99 {
100  ff_thread_release_ext_buffer(avctx, &f->tf);
101  av_buffer_unref(&f->extradata);
102  av_buffer_unref(&f->hwaccel_priv_buf);
103  f->segmentation_map = NULL;
104  f->hwaccel_picture_private = NULL;
105 }
106 
108 {
109  VP9Context *s = avctx->priv_data;
110  int ret, sz;
111 
113  if (ret < 0)
114  return ret;
115 
116  sz = 64 * s->sb_cols * s->sb_rows;
117  if (sz != s->frame_extradata_pool_size) {
118  av_buffer_pool_uninit(&s->frame_extradata_pool);
119  s->frame_extradata_pool = av_buffer_pool_init(sz * (1 + sizeof(VP9mvrefPair)), NULL);
120  if (!s->frame_extradata_pool) {
121  s->frame_extradata_pool_size = 0;
122  goto fail;
123  }
124  s->frame_extradata_pool_size = sz;
125  }
126  f->extradata = av_buffer_pool_get(s->frame_extradata_pool);
127  if (!f->extradata) {
128  goto fail;
129  }
130  memset(f->extradata->data, 0, f->extradata->size);
131 
132  f->segmentation_map = f->extradata->data;
133  f->mv = (VP9mvrefPair *) (f->extradata->data + sz);
134 
135  if (avctx->hwaccel) {
136  const AVHWAccel *hwaccel = avctx->hwaccel;
137  av_assert0(!f->hwaccel_picture_private);
138  if (hwaccel->frame_priv_data_size) {
139  f->hwaccel_priv_buf = av_buffer_allocz(hwaccel->frame_priv_data_size);
140  if (!f->hwaccel_priv_buf)
141  goto fail;
142  f->hwaccel_picture_private = f->hwaccel_priv_buf->data;
143  }
144  }
145 
146  return 0;
147 
148 fail:
149  vp9_frame_unref(avctx, f);
150  return AVERROR(ENOMEM);
151 }
152 
154 {
155  int ret;
156 
157  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
158  if (ret < 0)
159  return ret;
160 
161  dst->extradata = av_buffer_ref(src->extradata);
162  if (!dst->extradata)
163  goto fail;
164 
165  dst->segmentation_map = src->segmentation_map;
166  dst->mv = src->mv;
167  dst->uses_2pass = src->uses_2pass;
168 
169  if (src->hwaccel_picture_private) {
170  dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
171  if (!dst->hwaccel_priv_buf)
172  goto fail;
174  }
175 
176  return 0;
177 
178 fail:
179  vp9_frame_unref(avctx, dst);
180  return AVERROR(ENOMEM);
181 }
182 
183 static int update_size(AVCodecContext *avctx, int w, int h)
184 {
185 #define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + \
186  CONFIG_VP9_D3D11VA_HWACCEL * 2 + \
187  CONFIG_VP9_NVDEC_HWACCEL + \
188  CONFIG_VP9_VAAPI_HWACCEL + \
189  CONFIG_VP9_VDPAU_HWACCEL + \
190  CONFIG_VP9_VIDEOTOOLBOX_HWACCEL)
191  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
192  VP9Context *s = avctx->priv_data;
193  uint8_t *p;
194  int bytesperpixel = s->bytesperpixel, ret, cols, rows;
195  int lflvl_len, i;
196 
197  av_assert0(w > 0 && h > 0);
198 
199  if (!(s->pix_fmt == s->gf_fmt && w == s->w && h == s->h)) {
200  if ((ret = ff_set_dimensions(avctx, w, h)) < 0)
201  return ret;
202 
203  switch (s->pix_fmt) {
204  case AV_PIX_FMT_YUV420P:
206 #if CONFIG_VP9_DXVA2_HWACCEL
207  *fmtp++ = AV_PIX_FMT_DXVA2_VLD;
208 #endif
209 #if CONFIG_VP9_D3D11VA_HWACCEL
210  *fmtp++ = AV_PIX_FMT_D3D11VA_VLD;
211  *fmtp++ = AV_PIX_FMT_D3D11;
212 #endif
213 #if CONFIG_VP9_NVDEC_HWACCEL
214  *fmtp++ = AV_PIX_FMT_CUDA;
215 #endif
216 #if CONFIG_VP9_VAAPI_HWACCEL
217  *fmtp++ = AV_PIX_FMT_VAAPI;
218 #endif
219 #if CONFIG_VP9_VDPAU_HWACCEL
220  *fmtp++ = AV_PIX_FMT_VDPAU;
221 #endif
222 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
223  *fmtp++ = AV_PIX_FMT_VIDEOTOOLBOX;
224 #endif
225  break;
227 #if CONFIG_VP9_NVDEC_HWACCEL
228  *fmtp++ = AV_PIX_FMT_CUDA;
229 #endif
230 #if CONFIG_VP9_VAAPI_HWACCEL
231  *fmtp++ = AV_PIX_FMT_VAAPI;
232 #endif
233 #if CONFIG_VP9_VDPAU_HWACCEL
234  *fmtp++ = AV_PIX_FMT_VDPAU;
235 #endif
236  break;
237  case AV_PIX_FMT_YUV444P:
238 #if CONFIG_VP9_VAAPI_HWACCEL
239  *fmtp++ = AV_PIX_FMT_VAAPI;
240 #endif
241  break;
242  }
243 
244  *fmtp++ = s->pix_fmt;
245  *fmtp = AV_PIX_FMT_NONE;
246 
248  if (ret < 0)
249  return ret;
250 
251  avctx->pix_fmt = ret;
252  s->gf_fmt = s->pix_fmt;
253  s->w = w;
254  s->h = h;
255  }
256 
257  cols = (w + 7) >> 3;
258  rows = (h + 7) >> 3;
259 
260  if (s->intra_pred_data[0] && cols == s->cols && rows == s->rows && s->pix_fmt == s->last_fmt)
261  return 0;
262 
263  s->last_fmt = s->pix_fmt;
264  s->sb_cols = (w + 63) >> 6;
265  s->sb_rows = (h + 63) >> 6;
266  s->cols = (w + 7) >> 3;
267  s->rows = (h + 7) >> 3;
268  lflvl_len = avctx->active_thread_type == FF_THREAD_SLICE ? s->sb_rows : 1;
269 
270 #define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)
271  av_freep(&s->intra_pred_data[0]);
272  // FIXME we slightly over-allocate here for subsampled chroma, but a little
273  // bit of padding shouldn't affect performance...
274  p = av_malloc(s->sb_cols * (128 + 192 * bytesperpixel +
275  lflvl_len * sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx)));
276  if (!p)
277  return AVERROR(ENOMEM);
278  assign(s->intra_pred_data[0], uint8_t *, 64 * bytesperpixel);
279  assign(s->intra_pred_data[1], uint8_t *, 64 * bytesperpixel);
280  assign(s->intra_pred_data[2], uint8_t *, 64 * bytesperpixel);
281  assign(s->above_y_nnz_ctx, uint8_t *, 16);
282  assign(s->above_mode_ctx, uint8_t *, 16);
283  assign(s->above_mv_ctx, VP9mv(*)[2], 16);
284  assign(s->above_uv_nnz_ctx[0], uint8_t *, 16);
285  assign(s->above_uv_nnz_ctx[1], uint8_t *, 16);
286  assign(s->above_partition_ctx, uint8_t *, 8);
287  assign(s->above_skip_ctx, uint8_t *, 8);
288  assign(s->above_txfm_ctx, uint8_t *, 8);
289  assign(s->above_segpred_ctx, uint8_t *, 8);
290  assign(s->above_intra_ctx, uint8_t *, 8);
291  assign(s->above_comp_ctx, uint8_t *, 8);
292  assign(s->above_ref_ctx, uint8_t *, 8);
293  assign(s->above_filter_ctx, uint8_t *, 8);
294  assign(s->lflvl, VP9Filter *, lflvl_len);
295 #undef assign
296 
297  if (s->td) {
298  for (i = 0; i < s->active_tile_cols; i++)
299  vp9_tile_data_free(&s->td[i]);
300  }
301 
302  if (s->s.h.bpp != s->last_bpp) {
303  ff_vp9dsp_init(&s->dsp, s->s.h.bpp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
304  ff_videodsp_init(&s->vdsp, s->s.h.bpp);
305  s->last_bpp = s->s.h.bpp;
306  }
307 
308  return 0;
309 }
310 
312 {
313  int i;
314  VP9Context *s = avctx->priv_data;
315  int chroma_blocks, chroma_eobs, bytesperpixel = s->bytesperpixel;
316  VP9TileData *td = &s->td[0];
317 
318  if (td->b_base && td->block_base && s->block_alloc_using_2pass == s->s.frames[CUR_FRAME].uses_2pass)
319  return 0;
320 
322  chroma_blocks = 64 * 64 >> (s->ss_h + s->ss_v);
323  chroma_eobs = 16 * 16 >> (s->ss_h + s->ss_v);
324  if (s->s.frames[CUR_FRAME].uses_2pass) {
325  int sbs = s->sb_cols * s->sb_rows;
326 
327  td->b_base = av_malloc_array(s->cols * s->rows, sizeof(VP9Block));
328  td->block_base = av_mallocz(((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
329  16 * 16 + 2 * chroma_eobs) * sbs);
330  if (!td->b_base || !td->block_base)
331  return AVERROR(ENOMEM);
332  td->uvblock_base[0] = td->block_base + sbs * 64 * 64 * bytesperpixel;
333  td->uvblock_base[1] = td->uvblock_base[0] + sbs * chroma_blocks * bytesperpixel;
334  td->eob_base = (uint8_t *) (td->uvblock_base[1] + sbs * chroma_blocks * bytesperpixel);
335  td->uveob_base[0] = td->eob_base + 16 * 16 * sbs;
336  td->uveob_base[1] = td->uveob_base[0] + chroma_eobs * sbs;
337 
339  td->block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
340  if (!td->block_structure)
341  return AVERROR(ENOMEM);
342  }
343  } else {
344  for (i = 1; i < s->active_tile_cols; i++)
345  vp9_tile_data_free(&s->td[i]);
346 
347  for (i = 0; i < s->active_tile_cols; i++) {
348  s->td[i].b_base = av_malloc(sizeof(VP9Block));
349  s->td[i].block_base = av_mallocz((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
350  16 * 16 + 2 * chroma_eobs);
351  if (!s->td[i].b_base || !s->td[i].block_base)
352  return AVERROR(ENOMEM);
353  s->td[i].uvblock_base[0] = s->td[i].block_base + 64 * 64 * bytesperpixel;
354  s->td[i].uvblock_base[1] = s->td[i].uvblock_base[0] + chroma_blocks * bytesperpixel;
355  s->td[i].eob_base = (uint8_t *) (s->td[i].uvblock_base[1] + chroma_blocks * bytesperpixel);
356  s->td[i].uveob_base[0] = s->td[i].eob_base + 16 * 16;
357  s->td[i].uveob_base[1] = s->td[i].uveob_base[0] + chroma_eobs;
358 
360  s->td[i].block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
361  if (!s->td[i].block_structure)
362  return AVERROR(ENOMEM);
363  }
364  }
365  }
366  s->block_alloc_using_2pass = s->s.frames[CUR_FRAME].uses_2pass;
367 
368  return 0;
369 }
370 
371 // The sign bit is at the end, not the start, of a bit sequence
373 {
374  int v = get_bits(gb, n);
375  return get_bits1(gb) ? -v : v;
376 }
377 
378 static av_always_inline int inv_recenter_nonneg(int v, int m)
379 {
380  if (v > 2 * m)
381  return v;
382  if (v & 1)
383  return m - ((v + 1) >> 1);
384  return m + (v >> 1);
385 }
386 
387 // differential forward probability updates
388 static int update_prob(VPXRangeCoder *c, int p)
389 {
390  static const uint8_t inv_map_table[255] = {
391  7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
392  189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
393  10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
394  25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39,
395  40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54,
396  55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
397  70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
398  86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100,
399  101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
400  116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
401  131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
402  146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
403  161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
404  177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
405  192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
406  207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
407  222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
408  237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
409  252, 253, 253,
410  };
411  int d;
412 
413  /* This code is trying to do a differential probability update. For a
414  * current probability A in the range [1, 255], the difference to a new
415  * probability of any value can be expressed differentially as 1-A, 255-A
416  * where some part of this (absolute range) exists both in positive as
417  * well as the negative part, whereas another part only exists in one
418  * half. We're trying to code this shared part differentially, i.e.
419  * times two where the value of the lowest bit specifies the sign, and
420  * the single part is then coded on top of this. This absolute difference
421  * then again has a value of [0, 254], but a bigger value in this range
422  * indicates that we're further away from the original value A, so we
423  * can code this as a VLC code, since higher values are increasingly
424  * unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough'
425  * updates vs. the 'fine, exact' updates further down the range, which
426  * adds one extra dimension to this differential update model. */
427 
428  if (!vp89_rac_get(c)) {
429  d = vp89_rac_get_uint(c, 4) + 0;
430  } else if (!vp89_rac_get(c)) {
431  d = vp89_rac_get_uint(c, 4) + 16;
432  } else if (!vp89_rac_get(c)) {
433  d = vp89_rac_get_uint(c, 5) + 32;
434  } else {
435  d = vp89_rac_get_uint(c, 7);
436  if (d >= 65)
437  d = (d << 1) - 65 + vp89_rac_get(c);
438  d += 64;
439  av_assert2(d < FF_ARRAY_ELEMS(inv_map_table));
440  }
441 
442  return p <= 128 ? 1 + inv_recenter_nonneg(inv_map_table[d], p - 1) :
443  255 - inv_recenter_nonneg(inv_map_table[d], 255 - p);
444 }
445 
447 {
448  static const enum AVColorSpace colorspaces[8] = {
451  };
452  VP9Context *s = avctx->priv_data;
453  int bits = avctx->profile <= 1 ? 0 : 1 + get_bits1(&s->gb); // 0:8, 1:10, 2:12
454 
455  s->bpp_index = bits;
456  s->s.h.bpp = 8 + bits * 2;
457  s->bytesperpixel = (7 + s->s.h.bpp) >> 3;
458  avctx->colorspace = colorspaces[get_bits(&s->gb, 3)];
459  if (avctx->colorspace == AVCOL_SPC_RGB) { // RGB = profile 1
460  static const enum AVPixelFormat pix_fmt_rgb[3] = {
462  };
463  s->ss_h = s->ss_v = 0;
464  avctx->color_range = AVCOL_RANGE_JPEG;
465  s->pix_fmt = pix_fmt_rgb[bits];
466  if (avctx->profile & 1) {
467  if (get_bits1(&s->gb)) {
468  av_log(avctx, AV_LOG_ERROR, "Reserved bit set in RGB\n");
469  return AVERROR_INVALIDDATA;
470  }
471  } else {
472  av_log(avctx, AV_LOG_ERROR, "RGB not supported in profile %d\n",
473  avctx->profile);
474  return AVERROR_INVALIDDATA;
475  }
476  } else {
477  static const enum AVPixelFormat pix_fmt_for_ss[3][2 /* v */][2 /* h */] = {
484  };
486  if (avctx->profile & 1) {
487  s->ss_h = get_bits1(&s->gb);
488  s->ss_v = get_bits1(&s->gb);
489  s->pix_fmt = pix_fmt_for_ss[bits][s->ss_v][s->ss_h];
490  if (s->pix_fmt == AV_PIX_FMT_YUV420P) {
491  av_log(avctx, AV_LOG_ERROR, "YUV 4:2:0 not supported in profile %d\n",
492  avctx->profile);
493  return AVERROR_INVALIDDATA;
494  } else if (get_bits1(&s->gb)) {
495  av_log(avctx, AV_LOG_ERROR, "Profile %d color details reserved bit set\n",
496  avctx->profile);
497  return AVERROR_INVALIDDATA;
498  }
499  } else {
500  s->ss_h = s->ss_v = 1;
501  s->pix_fmt = pix_fmt_for_ss[bits][1][1];
502  }
503  }
504 
505  return 0;
506 }
507 
509  const uint8_t *data, int size, int *ref)
510 {
511  VP9Context *s = avctx->priv_data;
512  int c, i, j, k, l, m, n, w, h, max, size2, ret, sharp;
513  int last_invisible;
514  const uint8_t *data2;
515 
516  /* general header */
517  if ((ret = init_get_bits8(&s->gb, data, size)) < 0) {
518  av_log(avctx, AV_LOG_ERROR, "Failed to initialize bitstream reader\n");
519  return ret;
520  }
521  if (get_bits(&s->gb, 2) != 0x2) { // frame marker
522  av_log(avctx, AV_LOG_ERROR, "Invalid frame marker\n");
523  return AVERROR_INVALIDDATA;
524  }
525  avctx->profile = get_bits1(&s->gb);
526  avctx->profile |= get_bits1(&s->gb) << 1;
527  if (avctx->profile == 3) avctx->profile += get_bits1(&s->gb);
528  if (avctx->profile > 3) {
529  av_log(avctx, AV_LOG_ERROR, "Profile %d is not yet supported\n", avctx->profile);
530  return AVERROR_INVALIDDATA;
531  }
532  s->s.h.profile = avctx->profile;
533  if (get_bits1(&s->gb)) {
534  *ref = get_bits(&s->gb, 3);
535  return 0;
536  }
537 
538  s->last_keyframe = s->s.h.keyframe;
539  s->s.h.keyframe = !get_bits1(&s->gb);
540 
541  last_invisible = s->s.h.invisible;
542  s->s.h.invisible = !get_bits1(&s->gb);
543  s->s.h.errorres = get_bits1(&s->gb);
544  s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible;
545 
546  if (s->s.h.keyframe) {
547  if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
548  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
549  return AVERROR_INVALIDDATA;
550  }
551  if ((ret = read_colorspace_details(avctx)) < 0)
552  return ret;
553  // for profile 1, here follows the subsampling bits
554  s->s.h.refreshrefmask = 0xff;
555  w = get_bits(&s->gb, 16) + 1;
556  h = get_bits(&s->gb, 16) + 1;
557  if (get_bits1(&s->gb)) // display size
558  skip_bits(&s->gb, 32);
559  } else {
560  s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0;
561  s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2);
562  if (s->s.h.intraonly) {
563  if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
564  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
565  return AVERROR_INVALIDDATA;
566  }
567  if (avctx->profile >= 1) {
568  if ((ret = read_colorspace_details(avctx)) < 0)
569  return ret;
570  } else {
571  s->ss_h = s->ss_v = 1;
572  s->s.h.bpp = 8;
573  s->bpp_index = 0;
574  s->bytesperpixel = 1;
575  s->pix_fmt = AV_PIX_FMT_YUV420P;
576  avctx->colorspace = AVCOL_SPC_BT470BG;
577  avctx->color_range = AVCOL_RANGE_MPEG;
578  }
579  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
580  w = get_bits(&s->gb, 16) + 1;
581  h = get_bits(&s->gb, 16) + 1;
582  if (get_bits1(&s->gb)) // display size
583  skip_bits(&s->gb, 32);
584  } else {
585  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
586  s->s.h.refidx[0] = get_bits(&s->gb, 3);
587  s->s.h.signbias[0] = get_bits1(&s->gb) && !s->s.h.errorres;
588  s->s.h.refidx[1] = get_bits(&s->gb, 3);
589  s->s.h.signbias[1] = get_bits1(&s->gb) && !s->s.h.errorres;
590  s->s.h.refidx[2] = get_bits(&s->gb, 3);
591  s->s.h.signbias[2] = get_bits1(&s->gb) && !s->s.h.errorres;
592  if (!s->s.refs[s->s.h.refidx[0]].f->buf[0] ||
593  !s->s.refs[s->s.h.refidx[1]].f->buf[0] ||
594  !s->s.refs[s->s.h.refidx[2]].f->buf[0]) {
595  av_log(avctx, AV_LOG_ERROR, "Not all references are available\n");
596  return AVERROR_INVALIDDATA;
597  }
598  if (get_bits1(&s->gb)) {
599  w = s->s.refs[s->s.h.refidx[0]].f->width;
600  h = s->s.refs[s->s.h.refidx[0]].f->height;
601  } else if (get_bits1(&s->gb)) {
602  w = s->s.refs[s->s.h.refidx[1]].f->width;
603  h = s->s.refs[s->s.h.refidx[1]].f->height;
604  } else if (get_bits1(&s->gb)) {
605  w = s->s.refs[s->s.h.refidx[2]].f->width;
606  h = s->s.refs[s->s.h.refidx[2]].f->height;
607  } else {
608  w = get_bits(&s->gb, 16) + 1;
609  h = get_bits(&s->gb, 16) + 1;
610  }
611  // Note that in this code, "CUR_FRAME" is actually before we
612  // have formally allocated a frame, and thus actually represents
613  // the _last_ frame
614  s->s.h.use_last_frame_mvs &= s->s.frames[CUR_FRAME].tf.f->width == w &&
615  s->s.frames[CUR_FRAME].tf.f->height == h;
616  if (get_bits1(&s->gb)) // display size
617  skip_bits(&s->gb, 32);
618  s->s.h.highprecisionmvs = get_bits1(&s->gb);
619  s->s.h.filtermode = get_bits1(&s->gb) ? FILTER_SWITCHABLE :
620  get_bits(&s->gb, 2);
621  s->s.h.allowcompinter = s->s.h.signbias[0] != s->s.h.signbias[1] ||
622  s->s.h.signbias[0] != s->s.h.signbias[2];
623  if (s->s.h.allowcompinter) {
624  if (s->s.h.signbias[0] == s->s.h.signbias[1]) {
625  s->s.h.fixcompref = 2;
626  s->s.h.varcompref[0] = 0;
627  s->s.h.varcompref[1] = 1;
628  } else if (s->s.h.signbias[0] == s->s.h.signbias[2]) {
629  s->s.h.fixcompref = 1;
630  s->s.h.varcompref[0] = 0;
631  s->s.h.varcompref[1] = 2;
632  } else {
633  s->s.h.fixcompref = 0;
634  s->s.h.varcompref[0] = 1;
635  s->s.h.varcompref[1] = 2;
636  }
637  }
638  }
639  }
640  s->s.h.refreshctx = s->s.h.errorres ? 0 : get_bits1(&s->gb);
641  s->s.h.parallelmode = s->s.h.errorres ? 1 : get_bits1(&s->gb);
642  s->s.h.framectxid = c = get_bits(&s->gb, 2);
643  if (s->s.h.keyframe || s->s.h.intraonly)
644  s->s.h.framectxid = 0; // BUG: libvpx ignores this field in keyframes
645 
646  /* loopfilter header data */
647  if (s->s.h.keyframe || s->s.h.errorres || s->s.h.intraonly) {
648  // reset loopfilter defaults
649  s->s.h.lf_delta.ref[0] = 1;
650  s->s.h.lf_delta.ref[1] = 0;
651  s->s.h.lf_delta.ref[2] = -1;
652  s->s.h.lf_delta.ref[3] = -1;
653  s->s.h.lf_delta.mode[0] = 0;
654  s->s.h.lf_delta.mode[1] = 0;
655  memset(s->s.h.segmentation.feat, 0, sizeof(s->s.h.segmentation.feat));
656  }
657  s->s.h.filter.level = get_bits(&s->gb, 6);
658  sharp = get_bits(&s->gb, 3);
659  // if sharpness changed, reinit lim/mblim LUTs. if it didn't change, keep
660  // the old cache values since they are still valid
661  if (s->s.h.filter.sharpness != sharp) {
662  for (i = 1; i <= 63; i++) {
663  int limit = i;
664 
665  if (sharp > 0) {
666  limit >>= (sharp + 3) >> 2;
667  limit = FFMIN(limit, 9 - sharp);
668  }
669  limit = FFMAX(limit, 1);
670 
671  s->filter_lut.lim_lut[i] = limit;
672  s->filter_lut.mblim_lut[i] = 2 * (i + 2) + limit;
673  }
674  }
675  s->s.h.filter.sharpness = sharp;
676  if ((s->s.h.lf_delta.enabled = get_bits1(&s->gb))) {
677  if ((s->s.h.lf_delta.updated = get_bits1(&s->gb))) {
678  for (i = 0; i < 4; i++)
679  if (get_bits1(&s->gb))
680  s->s.h.lf_delta.ref[i] = get_sbits_inv(&s->gb, 6);
681  for (i = 0; i < 2; i++)
682  if (get_bits1(&s->gb))
683  s->s.h.lf_delta.mode[i] = get_sbits_inv(&s->gb, 6);
684  }
685  }
686 
687  /* quantization header data */
688  s->s.h.yac_qi = get_bits(&s->gb, 8);
689  s->s.h.ydc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
690  s->s.h.uvdc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
691  s->s.h.uvac_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
692  s->s.h.lossless = s->s.h.yac_qi == 0 && s->s.h.ydc_qdelta == 0 &&
693  s->s.h.uvdc_qdelta == 0 && s->s.h.uvac_qdelta == 0;
694  if (s->s.h.lossless)
696 
697  /* segmentation header info */
698  if ((s->s.h.segmentation.enabled = get_bits1(&s->gb))) {
699  if ((s->s.h.segmentation.update_map = get_bits1(&s->gb))) {
700  for (i = 0; i < 7; i++)
701  s->s.h.segmentation.prob[i] = get_bits1(&s->gb) ?
702  get_bits(&s->gb, 8) : 255;
703  if ((s->s.h.segmentation.temporal = get_bits1(&s->gb)))
704  for (i = 0; i < 3; i++)
705  s->s.h.segmentation.pred_prob[i] = get_bits1(&s->gb) ?
706  get_bits(&s->gb, 8) : 255;
707  }
708 
709  if (get_bits1(&s->gb)) {
710  s->s.h.segmentation.absolute_vals = get_bits1(&s->gb);
711  for (i = 0; i < 8; i++) {
712  if ((s->s.h.segmentation.feat[i].q_enabled = get_bits1(&s->gb)))
713  s->s.h.segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8);
714  if ((s->s.h.segmentation.feat[i].lf_enabled = get_bits1(&s->gb)))
715  s->s.h.segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6);
716  if ((s->s.h.segmentation.feat[i].ref_enabled = get_bits1(&s->gb)))
717  s->s.h.segmentation.feat[i].ref_val = get_bits(&s->gb, 2);
718  s->s.h.segmentation.feat[i].skip_enabled = get_bits1(&s->gb);
719  }
720  }
721  }
722 
723  // set qmul[] based on Y/UV, AC/DC and segmentation Q idx deltas
724  for (i = 0; i < (s->s.h.segmentation.enabled ? 8 : 1); i++) {
725  int qyac, qydc, quvac, quvdc, lflvl, sh;
726 
727  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].q_enabled) {
728  if (s->s.h.segmentation.absolute_vals)
729  qyac = av_clip_uintp2(s->s.h.segmentation.feat[i].q_val, 8);
730  else
731  qyac = av_clip_uintp2(s->s.h.yac_qi + s->s.h.segmentation.feat[i].q_val, 8);
732  } else {
733  qyac = s->s.h.yac_qi;
734  }
735  qydc = av_clip_uintp2(qyac + s->s.h.ydc_qdelta, 8);
736  quvdc = av_clip_uintp2(qyac + s->s.h.uvdc_qdelta, 8);
737  quvac = av_clip_uintp2(qyac + s->s.h.uvac_qdelta, 8);
738  qyac = av_clip_uintp2(qyac, 8);
739 
740  s->s.h.segmentation.feat[i].qmul[0][0] = ff_vp9_dc_qlookup[s->bpp_index][qydc];
741  s->s.h.segmentation.feat[i].qmul[0][1] = ff_vp9_ac_qlookup[s->bpp_index][qyac];
742  s->s.h.segmentation.feat[i].qmul[1][0] = ff_vp9_dc_qlookup[s->bpp_index][quvdc];
743  s->s.h.segmentation.feat[i].qmul[1][1] = ff_vp9_ac_qlookup[s->bpp_index][quvac];
744 
745  sh = s->s.h.filter.level >= 32;
746  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].lf_enabled) {
747  if (s->s.h.segmentation.absolute_vals)
748  lflvl = av_clip_uintp2(s->s.h.segmentation.feat[i].lf_val, 6);
749  else
750  lflvl = av_clip_uintp2(s->s.h.filter.level + s->s.h.segmentation.feat[i].lf_val, 6);
751  } else {
752  lflvl = s->s.h.filter.level;
753  }
754  if (s->s.h.lf_delta.enabled) {
755  s->s.h.segmentation.feat[i].lflvl[0][0] =
756  s->s.h.segmentation.feat[i].lflvl[0][1] =
757  av_clip_uintp2(lflvl + (s->s.h.lf_delta.ref[0] * (1 << sh)), 6);
758  for (j = 1; j < 4; j++) {
759  s->s.h.segmentation.feat[i].lflvl[j][0] =
760  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
761  s->s.h.lf_delta.mode[0]) * (1 << sh)), 6);
762  s->s.h.segmentation.feat[i].lflvl[j][1] =
763  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
764  s->s.h.lf_delta.mode[1]) * (1 << sh)), 6);
765  }
766  } else {
767  memset(s->s.h.segmentation.feat[i].lflvl, lflvl,
768  sizeof(s->s.h.segmentation.feat[i].lflvl));
769  }
770  }
771 
772  /* tiling info */
773  if ((ret = update_size(avctx, w, h)) < 0) {
774  av_log(avctx, AV_LOG_ERROR, "Failed to initialize decoder for %dx%d @ %d\n",
775  w, h, s->pix_fmt);
776  return ret;
777  }
778  for (s->s.h.tiling.log2_tile_cols = 0;
779  s->sb_cols > (64 << s->s.h.tiling.log2_tile_cols);
780  s->s.h.tiling.log2_tile_cols++) ;
781  for (max = 0; (s->sb_cols >> max) >= 4; max++) ;
782  max = FFMAX(0, max - 1);
783  while (max > s->s.h.tiling.log2_tile_cols) {
784  if (get_bits1(&s->gb))
785  s->s.h.tiling.log2_tile_cols++;
786  else
787  break;
788  }
789  s->s.h.tiling.log2_tile_rows = decode012(&s->gb);
790  s->s.h.tiling.tile_rows = 1 << s->s.h.tiling.log2_tile_rows;
791  if (s->s.h.tiling.tile_cols != (1 << s->s.h.tiling.log2_tile_cols)) {
792  int n_range_coders;
793  VPXRangeCoder *rc;
794 
795  if (s->td) {
796  for (i = 0; i < s->active_tile_cols; i++)
797  vp9_tile_data_free(&s->td[i]);
798  av_freep(&s->td);
799  }
800 
801  s->s.h.tiling.tile_cols = 1 << s->s.h.tiling.log2_tile_cols;
802  s->active_tile_cols = avctx->active_thread_type == FF_THREAD_SLICE ?
803  s->s.h.tiling.tile_cols : 1;
804  vp9_alloc_entries(avctx, s->sb_rows);
805  if (avctx->active_thread_type == FF_THREAD_SLICE) {
806  n_range_coders = 4; // max_tile_rows
807  } else {
808  n_range_coders = s->s.h.tiling.tile_cols;
809  }
810  s->td = av_calloc(s->active_tile_cols, sizeof(VP9TileData) +
811  n_range_coders * sizeof(VPXRangeCoder));
812  if (!s->td)
813  return AVERROR(ENOMEM);
814  rc = (VPXRangeCoder *) &s->td[s->active_tile_cols];
815  for (i = 0; i < s->active_tile_cols; i++) {
816  s->td[i].s = s;
817  s->td[i].c_b = rc;
818  rc += n_range_coders;
819  }
820  }
821 
822  /* check reference frames */
823  if (!s->s.h.keyframe && !s->s.h.intraonly) {
824  int valid_ref_frame = 0;
825  for (i = 0; i < 3; i++) {
826  AVFrame *ref = s->s.refs[s->s.h.refidx[i]].f;
827  int refw = ref->width, refh = ref->height;
828 
829  if (ref->format != avctx->pix_fmt) {
830  av_log(avctx, AV_LOG_ERROR,
831  "Ref pixfmt (%s) did not match current frame (%s)",
832  av_get_pix_fmt_name(ref->format),
833  av_get_pix_fmt_name(avctx->pix_fmt));
834  return AVERROR_INVALIDDATA;
835  } else if (refw == w && refh == h) {
836  s->mvscale[i][0] = s->mvscale[i][1] = 0;
837  } else {
838  /* Check to make sure at least one of frames that */
839  /* this frame references has valid dimensions */
840  if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 * refh) {
841  av_log(avctx, AV_LOG_WARNING,
842  "Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
843  refw, refh, w, h);
844  s->mvscale[i][0] = s->mvscale[i][1] = REF_INVALID_SCALE;
845  continue;
846  }
847  s->mvscale[i][0] = (refw << 14) / w;
848  s->mvscale[i][1] = (refh << 14) / h;
849  s->mvstep[i][0] = 16 * s->mvscale[i][0] >> 14;
850  s->mvstep[i][1] = 16 * s->mvscale[i][1] >> 14;
851  }
852  valid_ref_frame++;
853  }
854  if (!valid_ref_frame) {
855  av_log(avctx, AV_LOG_ERROR, "No valid reference frame is found, bitstream not supported\n");
856  return AVERROR_INVALIDDATA;
857  }
858  }
859 
860  if (s->s.h.keyframe || s->s.h.errorres || (s->s.h.intraonly && s->s.h.resetctx == 3)) {
861  s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p =
862  s->prob_ctx[3].p = ff_vp9_default_probs;
863  memcpy(s->prob_ctx[0].coef, ff_vp9_default_coef_probs,
864  sizeof(ff_vp9_default_coef_probs));
865  memcpy(s->prob_ctx[1].coef, ff_vp9_default_coef_probs,
866  sizeof(ff_vp9_default_coef_probs));
867  memcpy(s->prob_ctx[2].coef, ff_vp9_default_coef_probs,
868  sizeof(ff_vp9_default_coef_probs));
869  memcpy(s->prob_ctx[3].coef, ff_vp9_default_coef_probs,
870  sizeof(ff_vp9_default_coef_probs));
871  } else if (s->s.h.intraonly && s->s.h.resetctx == 2) {
872  s->prob_ctx[c].p = ff_vp9_default_probs;
873  memcpy(s->prob_ctx[c].coef, ff_vp9_default_coef_probs,
874  sizeof(ff_vp9_default_coef_probs));
875  }
876 
877  // next 16 bits is size of the rest of the header (arith-coded)
878  s->s.h.compressed_header_size = size2 = get_bits(&s->gb, 16);
879  s->s.h.uncompressed_header_size = (get_bits_count(&s->gb) + 7) / 8;
880 
881  data2 = align_get_bits(&s->gb);
882  if (size2 > size - (data2 - data)) {
883  av_log(avctx, AV_LOG_ERROR, "Invalid compressed header size\n");
884  return AVERROR_INVALIDDATA;
885  }
886  ret = ff_vpx_init_range_decoder(&s->c, data2, size2);
887  if (ret < 0)
888  return ret;
889 
890  if (vpx_rac_get_prob_branchy(&s->c, 128)) { // marker bit
891  av_log(avctx, AV_LOG_ERROR, "Marker bit was set\n");
892  return AVERROR_INVALIDDATA;
893  }
894 
895  for (i = 0; i < s->active_tile_cols; i++) {
896  if (s->s.h.keyframe || s->s.h.intraonly) {
897  memset(s->td[i].counts.coef, 0, sizeof(s->td[0].counts.coef));
898  memset(s->td[i].counts.eob, 0, sizeof(s->td[0].counts.eob));
899  } else {
900  memset(&s->td[i].counts, 0, sizeof(s->td[0].counts));
901  }
902  s->td[i].nb_block_structure = 0;
903  }
904 
905  /* FIXME is it faster to not copy here, but do it down in the fw updates
906  * as explicit copies if the fw update is missing (and skip the copy upon
907  * fw update)? */
908  s->prob.p = s->prob_ctx[c].p;
909 
910  // txfm updates
911  if (s->s.h.lossless) {
912  s->s.h.txfmmode = TX_4X4;
913  } else {
914  s->s.h.txfmmode = vp89_rac_get_uint(&s->c, 2);
915  if (s->s.h.txfmmode == 3)
916  s->s.h.txfmmode += vp89_rac_get(&s->c);
917 
918  if (s->s.h.txfmmode == TX_SWITCHABLE) {
919  for (i = 0; i < 2; i++)
920  if (vpx_rac_get_prob_branchy(&s->c, 252))
921  s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i]);
922  for (i = 0; i < 2; i++)
923  for (j = 0; j < 2; j++)
924  if (vpx_rac_get_prob_branchy(&s->c, 252))
925  s->prob.p.tx16p[i][j] =
926  update_prob(&s->c, s->prob.p.tx16p[i][j]);
927  for (i = 0; i < 2; i++)
928  for (j = 0; j < 3; j++)
929  if (vpx_rac_get_prob_branchy(&s->c, 252))
930  s->prob.p.tx32p[i][j] =
931  update_prob(&s->c, s->prob.p.tx32p[i][j]);
932  }
933  }
934 
935  // coef updates
936  for (i = 0; i < 4; i++) {
937  uint8_t (*ref)[2][6][6][3] = s->prob_ctx[c].coef[i];
938  if (vp89_rac_get(&s->c)) {
939  for (j = 0; j < 2; j++)
940  for (k = 0; k < 2; k++)
941  for (l = 0; l < 6; l++)
942  for (m = 0; m < 6; m++) {
943  uint8_t *p = s->prob.coef[i][j][k][l][m];
944  uint8_t *r = ref[j][k][l][m];
945  if (m >= 3 && l == 0) // dc only has 3 pt
946  break;
947  for (n = 0; n < 3; n++) {
948  if (vpx_rac_get_prob_branchy(&s->c, 252))
949  p[n] = update_prob(&s->c, r[n]);
950  else
951  p[n] = r[n];
952  }
953  memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
954  }
955  } else {
956  for (j = 0; j < 2; j++)
957  for (k = 0; k < 2; k++)
958  for (l = 0; l < 6; l++)
959  for (m = 0; m < 6; m++) {
960  uint8_t *p = s->prob.coef[i][j][k][l][m];
961  uint8_t *r = ref[j][k][l][m];
962  if (m > 3 && l == 0) // dc only has 3 pt
963  break;
964  memcpy(p, r, 3);
965  memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
966  }
967  }
968  if (s->s.h.txfmmode == i)
969  break;
970  }
971 
972  // mode updates
973  for (i = 0; i < 3; i++)
974  if (vpx_rac_get_prob_branchy(&s->c, 252))
975  s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i]);
976  if (!s->s.h.keyframe && !s->s.h.intraonly) {
977  for (i = 0; i < 7; i++)
978  for (j = 0; j < 3; j++)
979  if (vpx_rac_get_prob_branchy(&s->c, 252))
980  s->prob.p.mv_mode[i][j] =
981  update_prob(&s->c, s->prob.p.mv_mode[i][j]);
982 
983  if (s->s.h.filtermode == FILTER_SWITCHABLE)
984  for (i = 0; i < 4; i++)
985  for (j = 0; j < 2; j++)
986  if (vpx_rac_get_prob_branchy(&s->c, 252))
987  s->prob.p.filter[i][j] =
988  update_prob(&s->c, s->prob.p.filter[i][j]);
989 
990  for (i = 0; i < 4; i++)
991  if (vpx_rac_get_prob_branchy(&s->c, 252))
992  s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i]);
993 
994  if (s->s.h.allowcompinter) {
995  s->s.h.comppredmode = vp89_rac_get(&s->c);
996  if (s->s.h.comppredmode)
997  s->s.h.comppredmode += vp89_rac_get(&s->c);
998  if (s->s.h.comppredmode == PRED_SWITCHABLE)
999  for (i = 0; i < 5; i++)
1000  if (vpx_rac_get_prob_branchy(&s->c, 252))
1001  s->prob.p.comp[i] =
1002  update_prob(&s->c, s->prob.p.comp[i]);
1003  } else {
1004  s->s.h.comppredmode = PRED_SINGLEREF;
1005  }
1006 
1007  if (s->s.h.comppredmode != PRED_COMPREF) {
1008  for (i = 0; i < 5; i++) {
1009  if (vpx_rac_get_prob_branchy(&s->c, 252))
1010  s->prob.p.single_ref[i][0] =
1011  update_prob(&s->c, s->prob.p.single_ref[i][0]);
1012  if (vpx_rac_get_prob_branchy(&s->c, 252))
1013  s->prob.p.single_ref[i][1] =
1014  update_prob(&s->c, s->prob.p.single_ref[i][1]);
1015  }
1016  }
1017 
1018  if (s->s.h.comppredmode != PRED_SINGLEREF) {
1019  for (i = 0; i < 5; i++)
1020  if (vpx_rac_get_prob_branchy(&s->c, 252))
1021  s->prob.p.comp_ref[i] =
1022  update_prob(&s->c, s->prob.p.comp_ref[i]);
1023  }
1024 
1025  for (i = 0; i < 4; i++)
1026  for (j = 0; j < 9; j++)
1027  if (vpx_rac_get_prob_branchy(&s->c, 252))
1028  s->prob.p.y_mode[i][j] =
1029  update_prob(&s->c, s->prob.p.y_mode[i][j]);
1030 
1031  for (i = 0; i < 4; i++)
1032  for (j = 0; j < 4; j++)
1033  for (k = 0; k < 3; k++)
1034  if (vpx_rac_get_prob_branchy(&s->c, 252))
1035  s->prob.p.partition[3 - i][j][k] =
1036  update_prob(&s->c,
1037  s->prob.p.partition[3 - i][j][k]);
1038 
1039  // mv fields don't use the update_prob subexp model for some reason
1040  for (i = 0; i < 3; i++)
1041  if (vpx_rac_get_prob_branchy(&s->c, 252))
1042  s->prob.p.mv_joint[i] = (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1043 
1044  for (i = 0; i < 2; i++) {
1045  if (vpx_rac_get_prob_branchy(&s->c, 252))
1046  s->prob.p.mv_comp[i].sign =
1047  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1048 
1049  for (j = 0; j < 10; j++)
1050  if (vpx_rac_get_prob_branchy(&s->c, 252))
1051  s->prob.p.mv_comp[i].classes[j] =
1052  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1053 
1054  if (vpx_rac_get_prob_branchy(&s->c, 252))
1055  s->prob.p.mv_comp[i].class0 =
1056  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1057 
1058  for (j = 0; j < 10; j++)
1059  if (vpx_rac_get_prob_branchy(&s->c, 252))
1060  s->prob.p.mv_comp[i].bits[j] =
1061  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1062  }
1063 
1064  for (i = 0; i < 2; i++) {
1065  for (j = 0; j < 2; j++)
1066  for (k = 0; k < 3; k++)
1067  if (vpx_rac_get_prob_branchy(&s->c, 252))
1068  s->prob.p.mv_comp[i].class0_fp[j][k] =
1069  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1070 
1071  for (j = 0; j < 3; j++)
1072  if (vpx_rac_get_prob_branchy(&s->c, 252))
1073  s->prob.p.mv_comp[i].fp[j] =
1074  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1075  }
1076 
1077  if (s->s.h.highprecisionmvs) {
1078  for (i = 0; i < 2; i++) {
1079  if (vpx_rac_get_prob_branchy(&s->c, 252))
1080  s->prob.p.mv_comp[i].class0_hp =
1081  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1082 
1083  if (vpx_rac_get_prob_branchy(&s->c, 252))
1084  s->prob.p.mv_comp[i].hp =
1085  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1086  }
1087  }
1088  }
1089 
1090  return (data2 - data) + size2;
1091 }
1092 
1093 static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl,
1094  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1095 {
1096  const VP9Context *s = td->s;
1097  int c = ((s->above_partition_ctx[col] >> (3 - bl)) & 1) |
1098  (((td->left_partition_ctx[row & 0x7] >> (3 - bl)) & 1) << 1);
1099  const uint8_t *p = s->s.h.keyframe || s->s.h.intraonly ? ff_vp9_default_kf_partition_probs[bl][c] :
1100  s->prob.p.partition[bl][c];
1101  enum BlockPartition bp;
1102  ptrdiff_t hbs = 4 >> bl;
1103  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1104  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1105  int bytesperpixel = s->bytesperpixel;
1106 
1107  if (bl == BL_8X8) {
1109  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1110  } else if (col + hbs < s->cols) { // FIXME why not <=?
1111  if (row + hbs < s->rows) { // FIXME why not <=?
1113  switch (bp) {
1114  case PARTITION_NONE:
1115  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1116  break;
1117  case PARTITION_H:
1118  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1119  yoff += hbs * 8 * y_stride;
1120  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1121  ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, bl, bp);
1122  break;
1123  case PARTITION_V:
1124  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1125  yoff += hbs * 8 * bytesperpixel;
1126  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1127  ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, bl, bp);
1128  break;
1129  case PARTITION_SPLIT:
1130  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1131  decode_sb(td, row, col + hbs, lflvl,
1132  yoff + 8 * hbs * bytesperpixel,
1133  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1134  yoff += hbs * 8 * y_stride;
1135  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1136  decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1137  decode_sb(td, row + hbs, col + hbs, lflvl,
1138  yoff + 8 * hbs * bytesperpixel,
1139  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1140  break;
1141  default:
1142  av_assert0(0);
1143  }
1144  } else if (vpx_rac_get_prob_branchy(td->c, p[1])) {
1145  bp = PARTITION_SPLIT;
1146  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1147  decode_sb(td, row, col + hbs, lflvl,
1148  yoff + 8 * hbs * bytesperpixel,
1149  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1150  } else {
1151  bp = PARTITION_H;
1152  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1153  }
1154  } else if (row + hbs < s->rows) { // FIXME why not <=?
1155  if (vpx_rac_get_prob_branchy(td->c, p[2])) {
1156  bp = PARTITION_SPLIT;
1157  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1158  yoff += hbs * 8 * y_stride;
1159  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1160  decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1161  } else {
1162  bp = PARTITION_V;
1163  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1164  }
1165  } else {
1166  bp = PARTITION_SPLIT;
1167  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1168  }
1169  td->counts.partition[bl][c][bp]++;
1170 }
1171 
1172 static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl,
1173  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1174 {
1175  const VP9Context *s = td->s;
1176  VP9Block *b = td->b;
1177  ptrdiff_t hbs = 4 >> bl;
1178  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1179  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1180  int bytesperpixel = s->bytesperpixel;
1181 
1182  if (bl == BL_8X8) {
1183  av_assert2(b->bl == BL_8X8);
1184  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1185  } else if (td->b->bl == bl) {
1186  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1187  if (b->bp == PARTITION_H && row + hbs < s->rows) {
1188  yoff += hbs * 8 * y_stride;
1189  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1190  ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, b->bl, b->bp);
1191  } else if (b->bp == PARTITION_V && col + hbs < s->cols) {
1192  yoff += hbs * 8 * bytesperpixel;
1193  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1194  ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, b->bl, b->bp);
1195  }
1196  } else {
1197  decode_sb_mem(td, row, col, lflvl, yoff, uvoff, bl + 1);
1198  if (col + hbs < s->cols) { // FIXME why not <=?
1199  if (row + hbs < s->rows) {
1200  decode_sb_mem(td, row, col + hbs, lflvl, yoff + 8 * hbs * bytesperpixel,
1201  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1202  yoff += hbs * 8 * y_stride;
1203  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1204  decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1205  decode_sb_mem(td, row + hbs, col + hbs, lflvl,
1206  yoff + 8 * hbs * bytesperpixel,
1207  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1208  } else {
1209  yoff += hbs * 8 * bytesperpixel;
1210  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1211  decode_sb_mem(td, row, col + hbs, lflvl, yoff, uvoff, bl + 1);
1212  }
1213  } else if (row + hbs < s->rows) {
1214  yoff += hbs * 8 * y_stride;
1215  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1216  decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1217  }
1218  }
1219 }
1220 
1221 static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
1222 {
1223  int sb_start = ( idx * n) >> log2_n;
1224  int sb_end = ((idx + 1) * n) >> log2_n;
1225  *start = FFMIN(sb_start, n) << 3;
1226  *end = FFMIN(sb_end, n) << 3;
1227 }
1228 
1230 {
1231  int i;
1232 
1233  av_freep(&s->intra_pred_data[0]);
1234  for (i = 0; i < s->active_tile_cols; i++)
1235  vp9_tile_data_free(&s->td[i]);
1236 }
1237 
1239 {
1240  VP9Context *s = avctx->priv_data;
1241  int i;
1242 
1243  for (i = 0; i < 3; i++) {
1244  vp9_frame_unref(avctx, &s->s.frames[i]);
1245  av_frame_free(&s->s.frames[i].tf.f);
1246  }
1247  av_buffer_pool_uninit(&s->frame_extradata_pool);
1248  for (i = 0; i < 8; i++) {
1249  ff_thread_release_ext_buffer(avctx, &s->s.refs[i]);
1250  av_frame_free(&s->s.refs[i].f);
1251  ff_thread_release_ext_buffer(avctx, &s->next_refs[i]);
1252  av_frame_free(&s->next_refs[i].f);
1253  }
1254 
1255  free_buffers(s);
1256 #if HAVE_THREADS
1257  av_freep(&s->entries);
1258  ff_pthread_free(s, vp9_context_offsets);
1259 #endif
1260  av_freep(&s->td);
1261  return 0;
1262 }
1263 
1264 static int decode_tiles(AVCodecContext *avctx,
1265  const uint8_t *data, int size)
1266 {
1267  VP9Context *s = avctx->priv_data;
1268  VP9TileData *td = &s->td[0];
1269  int row, col, tile_row, tile_col, ret;
1270  int bytesperpixel;
1271  int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1272  AVFrame *f;
1273  ptrdiff_t yoff, uvoff, ls_y, ls_uv;
1274 
1275  f = s->s.frames[CUR_FRAME].tf.f;
1276  ls_y = f->linesize[0];
1277  ls_uv =f->linesize[1];
1278  bytesperpixel = s->bytesperpixel;
1279 
1280  yoff = uvoff = 0;
1281  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1282  set_tile_offset(&tile_row_start, &tile_row_end,
1283  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1284 
1285  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1286  int64_t tile_size;
1287 
1288  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1289  tile_row == s->s.h.tiling.tile_rows - 1) {
1290  tile_size = size;
1291  } else {
1292  tile_size = AV_RB32(data);
1293  data += 4;
1294  size -= 4;
1295  }
1296  if (tile_size > size) {
1297  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1298  return AVERROR_INVALIDDATA;
1299  }
1300  ret = ff_vpx_init_range_decoder(&td->c_b[tile_col], data, tile_size);
1301  if (ret < 0)
1302  return ret;
1303  if (vpx_rac_get_prob_branchy(&td->c_b[tile_col], 128)) { // marker bit
1304  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1305  return AVERROR_INVALIDDATA;
1306  }
1307  data += tile_size;
1308  size -= tile_size;
1309  }
1310 
1311  for (row = tile_row_start; row < tile_row_end;
1312  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1313  VP9Filter *lflvl_ptr = s->lflvl;
1314  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1315 
1316  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1317  set_tile_offset(&tile_col_start, &tile_col_end,
1318  tile_col, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1319  td->tile_col_start = tile_col_start;
1320  if (s->pass != 2) {
1321  memset(td->left_partition_ctx, 0, 8);
1322  memset(td->left_skip_ctx, 0, 8);
1323  if (s->s.h.keyframe || s->s.h.intraonly) {
1324  memset(td->left_mode_ctx, DC_PRED, 16);
1325  } else {
1326  memset(td->left_mode_ctx, NEARESTMV, 8);
1327  }
1328  memset(td->left_y_nnz_ctx, 0, 16);
1329  memset(td->left_uv_nnz_ctx, 0, 32);
1330  memset(td->left_segpred_ctx, 0, 8);
1331 
1332  td->c = &td->c_b[tile_col];
1333  }
1334 
1335  for (col = tile_col_start;
1336  col < tile_col_end;
1337  col += 8, yoff2 += 64 * bytesperpixel,
1338  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1339  // FIXME integrate with lf code (i.e. zero after each
1340  // use, similar to invtxfm coefficients, or similar)
1341  if (s->pass != 1) {
1342  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1343  }
1344 
1345  if (s->pass == 2) {
1346  decode_sb_mem(td, row, col, lflvl_ptr,
1347  yoff2, uvoff2, BL_64X64);
1348  } else {
1349  if (vpx_rac_is_end(td->c)) {
1350  return AVERROR_INVALIDDATA;
1351  }
1352  decode_sb(td, row, col, lflvl_ptr,
1353  yoff2, uvoff2, BL_64X64);
1354  }
1355  }
1356  }
1357 
1358  if (s->pass == 1)
1359  continue;
1360 
1361  // backup pre-loopfilter reconstruction data for intra
1362  // prediction of next row of sb64s
1363  if (row + 8 < s->rows) {
1364  memcpy(s->intra_pred_data[0],
1365  f->data[0] + yoff + 63 * ls_y,
1366  8 * s->cols * bytesperpixel);
1367  memcpy(s->intra_pred_data[1],
1368  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1369  8 * s->cols * bytesperpixel >> s->ss_h);
1370  memcpy(s->intra_pred_data[2],
1371  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1372  8 * s->cols * bytesperpixel >> s->ss_h);
1373  }
1374 
1375  // loopfilter one row
1376  if (s->s.h.filter.level) {
1377  yoff2 = yoff;
1378  uvoff2 = uvoff;
1379  lflvl_ptr = s->lflvl;
1380  for (col = 0; col < s->cols;
1381  col += 8, yoff2 += 64 * bytesperpixel,
1382  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1383  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, row, col,
1384  yoff2, uvoff2);
1385  }
1386  }
1387 
1388  // FIXME maybe we can make this more finegrained by running the
1389  // loopfilter per-block instead of after each sbrow
1390  // In fact that would also make intra pred left preparation easier?
1391  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, row >> 3, 0);
1392  }
1393  }
1394  return 0;
1395 }
1396 
1397 #if HAVE_THREADS
1398 static av_always_inline
1399 int decode_tiles_mt(AVCodecContext *avctx, void *tdata, int jobnr,
1400  int threadnr)
1401 {
1402  VP9Context *s = avctx->priv_data;
1403  VP9TileData *td = &s->td[jobnr];
1404  ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1405  int bytesperpixel = s->bytesperpixel, row, col, tile_row;
1406  unsigned tile_cols_len;
1407  int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1408  VP9Filter *lflvl_ptr_base;
1409  AVFrame *f;
1410 
1411  f = s->s.frames[CUR_FRAME].tf.f;
1412  ls_y = f->linesize[0];
1413  ls_uv =f->linesize[1];
1414 
1415  set_tile_offset(&tile_col_start, &tile_col_end,
1416  jobnr, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1417  td->tile_col_start = tile_col_start;
1418  uvoff = (64 * bytesperpixel >> s->ss_h)*(tile_col_start >> 3);
1419  yoff = (64 * bytesperpixel)*(tile_col_start >> 3);
1420  lflvl_ptr_base = s->lflvl+(tile_col_start >> 3);
1421 
1422  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1423  set_tile_offset(&tile_row_start, &tile_row_end,
1424  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1425 
1426  td->c = &td->c_b[tile_row];
1427  for (row = tile_row_start; row < tile_row_end;
1428  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1429  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1430  VP9Filter *lflvl_ptr = lflvl_ptr_base+s->sb_cols*(row >> 3);
1431 
1432  memset(td->left_partition_ctx, 0, 8);
1433  memset(td->left_skip_ctx, 0, 8);
1434  if (s->s.h.keyframe || s->s.h.intraonly) {
1435  memset(td->left_mode_ctx, DC_PRED, 16);
1436  } else {
1437  memset(td->left_mode_ctx, NEARESTMV, 8);
1438  }
1439  memset(td->left_y_nnz_ctx, 0, 16);
1440  memset(td->left_uv_nnz_ctx, 0, 32);
1441  memset(td->left_segpred_ctx, 0, 8);
1442 
1443  for (col = tile_col_start;
1444  col < tile_col_end;
1445  col += 8, yoff2 += 64 * bytesperpixel,
1446  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1447  // FIXME integrate with lf code (i.e. zero after each
1448  // use, similar to invtxfm coefficients, or similar)
1449  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1450  decode_sb(td, row, col, lflvl_ptr,
1451  yoff2, uvoff2, BL_64X64);
1452  }
1453 
1454  // backup pre-loopfilter reconstruction data for intra
1455  // prediction of next row of sb64s
1456  tile_cols_len = tile_col_end - tile_col_start;
1457  if (row + 8 < s->rows) {
1458  memcpy(s->intra_pred_data[0] + (tile_col_start * 8 * bytesperpixel),
1459  f->data[0] + yoff + 63 * ls_y,
1460  8 * tile_cols_len * bytesperpixel);
1461  memcpy(s->intra_pred_data[1] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
1462  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1463  8 * tile_cols_len * bytesperpixel >> s->ss_h);
1464  memcpy(s->intra_pred_data[2] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
1465  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1466  8 * tile_cols_len * bytesperpixel >> s->ss_h);
1467  }
1468 
1469  vp9_report_tile_progress(s, row >> 3, 1);
1470  }
1471  }
1472  return 0;
1473 }
1474 
1475 static av_always_inline
1476 int loopfilter_proc(AVCodecContext *avctx)
1477 {
1478  VP9Context *s = avctx->priv_data;
1479  ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1480  VP9Filter *lflvl_ptr;
1481  int bytesperpixel = s->bytesperpixel, col, i;
1482  AVFrame *f;
1483 
1484  f = s->s.frames[CUR_FRAME].tf.f;
1485  ls_y = f->linesize[0];
1486  ls_uv =f->linesize[1];
1487 
1488  for (i = 0; i < s->sb_rows; i++) {
1489  vp9_await_tile_progress(s, i, s->s.h.tiling.tile_cols);
1490 
1491  if (s->s.h.filter.level) {
1492  yoff = (ls_y * 64)*i;
1493  uvoff = (ls_uv * 64 >> s->ss_v)*i;
1494  lflvl_ptr = s->lflvl+s->sb_cols*i;
1495  for (col = 0; col < s->cols;
1496  col += 8, yoff += 64 * bytesperpixel,
1497  uvoff += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1498  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, i << 3, col,
1499  yoff, uvoff);
1500  }
1501  }
1502  }
1503  return 0;
1504 }
1505 #endif
1506 
1508 {
1509  AVVideoEncParams *par;
1510  unsigned int tile, nb_blocks = 0;
1511 
1512  if (s->s.h.segmentation.enabled) {
1513  for (tile = 0; tile < s->active_tile_cols; tile++)
1514  nb_blocks += s->td[tile].nb_block_structure;
1515  }
1516 
1518  AV_VIDEO_ENC_PARAMS_VP9, nb_blocks);
1519  if (!par)
1520  return AVERROR(ENOMEM);
1521 
1522  par->qp = s->s.h.yac_qi;
1523  par->delta_qp[0][0] = s->s.h.ydc_qdelta;
1524  par->delta_qp[1][0] = s->s.h.uvdc_qdelta;
1525  par->delta_qp[2][0] = s->s.h.uvdc_qdelta;
1526  par->delta_qp[1][1] = s->s.h.uvac_qdelta;
1527  par->delta_qp[2][1] = s->s.h.uvac_qdelta;
1528 
1529  if (nb_blocks) {
1530  unsigned int block = 0;
1531  unsigned int tile, block_tile;
1532 
1533  for (tile = 0; tile < s->active_tile_cols; tile++) {
1534  VP9TileData *td = &s->td[tile];
1535 
1536  for (block_tile = 0; block_tile < td->nb_block_structure; block_tile++) {
1538  unsigned int row = td->block_structure[block_tile].row;
1539  unsigned int col = td->block_structure[block_tile].col;
1540  uint8_t seg_id = frame->segmentation_map[row * 8 * s->sb_cols + col];
1541 
1542  b->src_x = col * 8;
1543  b->src_y = row * 8;
1544  b->w = 1 << (3 + td->block_structure[block_tile].block_size_idx_x);
1545  b->h = 1 << (3 + td->block_structure[block_tile].block_size_idx_y);
1546 
1547  if (s->s.h.segmentation.feat[seg_id].q_enabled) {
1548  b->delta_qp = s->s.h.segmentation.feat[seg_id].q_val;
1549  if (s->s.h.segmentation.absolute_vals)
1550  b->delta_qp -= par->qp;
1551  }
1552  }
1553  }
1554  }
1555 
1556  return 0;
1557 }
1558 
1560  int *got_frame, AVPacket *pkt)
1561 {
1562  const uint8_t *data = pkt->data;
1563  int size = pkt->size;
1564  VP9Context *s = avctx->priv_data;
1565  int ret, i, j, ref;
1566  int retain_segmap_ref = s->s.frames[REF_FRAME_SEGMAP].segmentation_map &&
1567  (!s->s.h.segmentation.enabled || !s->s.h.segmentation.update_map);
1568  AVFrame *f;
1569 
1570  if ((ret = decode_frame_header(avctx, data, size, &ref)) < 0) {
1571  return ret;
1572  } else if (ret == 0) {
1573  if (!s->s.refs[ref].f->buf[0]) {
1574  av_log(avctx, AV_LOG_ERROR, "Requested reference %d not available\n", ref);
1575  return AVERROR_INVALIDDATA;
1576  }
1577  if ((ret = av_frame_ref(frame, s->s.refs[ref].f)) < 0)
1578  return ret;
1579  frame->pts = pkt->pts;
1580  frame->pkt_dts = pkt->dts;
1581  for (i = 0; i < 8; i++) {
1582  if (s->next_refs[i].f->buf[0])
1583  ff_thread_release_ext_buffer(avctx, &s->next_refs[i]);
1584  if (s->s.refs[i].f->buf[0] &&
1585  (ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i])) < 0)
1586  return ret;
1587  }
1588  *got_frame = 1;
1589  return pkt->size;
1590  }
1591  data += ret;
1592  size -= ret;
1593 
1594  if (!retain_segmap_ref || s->s.h.keyframe || s->s.h.intraonly) {
1595  if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0])
1596  vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_SEGMAP]);
1597  if (!s->s.h.keyframe && !s->s.h.intraonly && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
1598  (ret = vp9_frame_ref(avctx, &s->s.frames[REF_FRAME_SEGMAP], &s->s.frames[CUR_FRAME])) < 0)
1599  return ret;
1600  }
1601  if (s->s.frames[REF_FRAME_MVPAIR].tf.f->buf[0])
1602  vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_MVPAIR]);
1603  if (!s->s.h.intraonly && !s->s.h.keyframe && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
1604  (ret = vp9_frame_ref(avctx, &s->s.frames[REF_FRAME_MVPAIR], &s->s.frames[CUR_FRAME])) < 0)
1605  return ret;
1606  if (s->s.frames[CUR_FRAME].tf.f->buf[0])
1607  vp9_frame_unref(avctx, &s->s.frames[CUR_FRAME]);
1608  if ((ret = vp9_frame_alloc(avctx, &s->s.frames[CUR_FRAME])) < 0)
1609  return ret;
1610  f = s->s.frames[CUR_FRAME].tf.f;
1611  f->key_frame = s->s.h.keyframe;
1612  f->pict_type = (s->s.h.keyframe || s->s.h.intraonly) ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1613 
1614  if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0] &&
1615  (s->s.frames[REF_FRAME_MVPAIR].tf.f->width != s->s.frames[CUR_FRAME].tf.f->width ||
1616  s->s.frames[REF_FRAME_MVPAIR].tf.f->height != s->s.frames[CUR_FRAME].tf.f->height)) {
1617  vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_SEGMAP]);
1618  }
1619 
1620  // ref frame setup
1621  for (i = 0; i < 8; i++) {
1622  if (s->next_refs[i].f->buf[0])
1623  ff_thread_release_ext_buffer(avctx, &s->next_refs[i]);
1624  if (s->s.h.refreshrefmask & (1 << i)) {
1625  ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.frames[CUR_FRAME].tf);
1626  } else if (s->s.refs[i].f->buf[0]) {
1627  ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i]);
1628  }
1629  if (ret < 0)
1630  return ret;
1631  }
1632 
1633  if (avctx->hwaccel) {
1634  ret = avctx->hwaccel->start_frame(avctx, NULL, 0);
1635  if (ret < 0)
1636  return ret;
1637  ret = avctx->hwaccel->decode_slice(avctx, pkt->data, pkt->size);
1638  if (ret < 0)
1639  return ret;
1640  ret = avctx->hwaccel->end_frame(avctx);
1641  if (ret < 0)
1642  return ret;
1643  goto finish;
1644  }
1645 
1646  // main tile decode loop
1647  memset(s->above_partition_ctx, 0, s->cols);
1648  memset(s->above_skip_ctx, 0, s->cols);
1649  if (s->s.h.keyframe || s->s.h.intraonly) {
1650  memset(s->above_mode_ctx, DC_PRED, s->cols * 2);
1651  } else {
1652  memset(s->above_mode_ctx, NEARESTMV, s->cols);
1653  }
1654  memset(s->above_y_nnz_ctx, 0, s->sb_cols * 16);
1655  memset(s->above_uv_nnz_ctx[0], 0, s->sb_cols * 16 >> s->ss_h);
1656  memset(s->above_uv_nnz_ctx[1], 0, s->sb_cols * 16 >> s->ss_h);
1657  memset(s->above_segpred_ctx, 0, s->cols);
1658  s->pass = s->s.frames[CUR_FRAME].uses_2pass =
1659  avctx->active_thread_type == FF_THREAD_FRAME && s->s.h.refreshctx && !s->s.h.parallelmode;
1660  if ((ret = update_block_buffers(avctx)) < 0) {
1661  av_log(avctx, AV_LOG_ERROR,
1662  "Failed to allocate block buffers\n");
1663  return ret;
1664  }
1665  if (s->s.h.refreshctx && s->s.h.parallelmode) {
1666  int j, k, l, m;
1667 
1668  for (i = 0; i < 4; i++) {
1669  for (j = 0; j < 2; j++)
1670  for (k = 0; k < 2; k++)
1671  for (l = 0; l < 6; l++)
1672  for (m = 0; m < 6; m++)
1673  memcpy(s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m],
1674  s->prob.coef[i][j][k][l][m], 3);
1675  if (s->s.h.txfmmode == i)
1676  break;
1677  }
1678  s->prob_ctx[s->s.h.framectxid].p = s->prob.p;
1679  ff_thread_finish_setup(avctx);
1680  } else if (!s->s.h.refreshctx) {
1681  ff_thread_finish_setup(avctx);
1682  }
1683 
1684 #if HAVE_THREADS
1685  if (avctx->active_thread_type & FF_THREAD_SLICE) {
1686  for (i = 0; i < s->sb_rows; i++)
1687  atomic_store(&s->entries[i], 0);
1688  }
1689 #endif
1690 
1691  do {
1692  for (i = 0; i < s->active_tile_cols; i++) {
1693  s->td[i].b = s->td[i].b_base;
1694  s->td[i].block = s->td[i].block_base;
1695  s->td[i].uvblock[0] = s->td[i].uvblock_base[0];
1696  s->td[i].uvblock[1] = s->td[i].uvblock_base[1];
1697  s->td[i].eob = s->td[i].eob_base;
1698  s->td[i].uveob[0] = s->td[i].uveob_base[0];
1699  s->td[i].uveob[1] = s->td[i].uveob_base[1];
1700  s->td[i].error_info = 0;
1701  }
1702 
1703 #if HAVE_THREADS
1704  if (avctx->active_thread_type == FF_THREAD_SLICE) {
1705  int tile_row, tile_col;
1706 
1707  av_assert1(!s->pass);
1708 
1709  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1710  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1711  int64_t tile_size;
1712 
1713  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1714  tile_row == s->s.h.tiling.tile_rows - 1) {
1715  tile_size = size;
1716  } else {
1717  tile_size = AV_RB32(data);
1718  data += 4;
1719  size -= 4;
1720  }
1721  if (tile_size > size)
1722  return AVERROR_INVALIDDATA;
1723  ret = ff_vpx_init_range_decoder(&s->td[tile_col].c_b[tile_row], data, tile_size);
1724  if (ret < 0)
1725  return ret;
1726  if (vpx_rac_get_prob_branchy(&s->td[tile_col].c_b[tile_row], 128)) // marker bit
1727  return AVERROR_INVALIDDATA;
1728  data += tile_size;
1729  size -= tile_size;
1730  }
1731  }
1732 
1733  ff_slice_thread_execute_with_mainfunc(avctx, decode_tiles_mt, loopfilter_proc, s->td, NULL, s->s.h.tiling.tile_cols);
1734  } else
1735 #endif
1736  {
1737  ret = decode_tiles(avctx, data, size);
1738  if (ret < 0) {
1739  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1740  return ret;
1741  }
1742  }
1743 
1744  // Sum all counts fields into td[0].counts for tile threading
1745  if (avctx->active_thread_type == FF_THREAD_SLICE)
1746  for (i = 1; i < s->s.h.tiling.tile_cols; i++)
1747  for (j = 0; j < sizeof(s->td[i].counts) / sizeof(unsigned); j++)
1748  ((unsigned *)&s->td[0].counts)[j] += ((unsigned *)&s->td[i].counts)[j];
1749 
1750  if (s->pass < 2 && s->s.h.refreshctx && !s->s.h.parallelmode) {
1752  ff_thread_finish_setup(avctx);
1753  }
1754  } while (s->pass++ == 1);
1755  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1756 
1757  if (s->td->error_info < 0) {
1758  av_log(avctx, AV_LOG_ERROR, "Failed to decode tile data\n");
1759  s->td->error_info = 0;
1760  return AVERROR_INVALIDDATA;
1761  }
1763  ret = vp9_export_enc_params(s, &s->s.frames[CUR_FRAME]);
1764  if (ret < 0)
1765  return ret;
1766  }
1767 
1768 finish:
1769  // ref frame setup
1770  for (i = 0; i < 8; i++) {
1771  if (s->s.refs[i].f->buf[0])
1772  ff_thread_release_ext_buffer(avctx, &s->s.refs[i]);
1773  if (s->next_refs[i].f->buf[0] &&
1774  (ret = ff_thread_ref_frame(&s->s.refs[i], &s->next_refs[i])) < 0)
1775  return ret;
1776  }
1777 
1778  if (!s->s.h.invisible) {
1779  if ((ret = av_frame_ref(frame, s->s.frames[CUR_FRAME].tf.f)) < 0)
1780  return ret;
1781  *got_frame = 1;
1782  }
1783 
1784  return pkt->size;
1785 }
1786 
1788 {
1789  VP9Context *s = avctx->priv_data;
1790  int i;
1791 
1792  for (i = 0; i < 3; i++)
1793  vp9_frame_unref(avctx, &s->s.frames[i]);
1794  for (i = 0; i < 8; i++)
1795  ff_thread_release_ext_buffer(avctx, &s->s.refs[i]);
1796 }
1797 
1799 {
1800  VP9Context *s = avctx->priv_data;
1801  int ret;
1802 
1803  s->last_bpp = 0;
1804  s->s.h.filter.sharpness = -1;
1805 
1806 #if HAVE_THREADS
1807  if (avctx->active_thread_type & FF_THREAD_SLICE) {
1808  ret = ff_pthread_init(s, vp9_context_offsets);
1809  if (ret < 0)
1810  return ret;
1811  }
1812 #endif
1813 
1814  for (int i = 0; i < 3; i++) {
1815  s->s.frames[i].tf.f = av_frame_alloc();
1816  if (!s->s.frames[i].tf.f)
1817  return AVERROR(ENOMEM);
1818  }
1819  for (int i = 0; i < 8; i++) {
1820  s->s.refs[i].f = av_frame_alloc();
1821  s->next_refs[i].f = av_frame_alloc();
1822  if (!s->s.refs[i].f || !s->next_refs[i].f)
1823  return AVERROR(ENOMEM);
1824  }
1825  return 0;
1826 }
1827 
1828 #if HAVE_THREADS
1829 static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
1830 {
1831  int i, ret;
1832  VP9Context *s = dst->priv_data, *ssrc = src->priv_data;
1833 
1834  for (i = 0; i < 3; i++) {
1835  if (s->s.frames[i].tf.f->buf[0])
1836  vp9_frame_unref(dst, &s->s.frames[i]);
1837  if (ssrc->s.frames[i].tf.f->buf[0]) {
1838  if ((ret = vp9_frame_ref(dst, &s->s.frames[i], &ssrc->s.frames[i])) < 0)
1839  return ret;
1840  }
1841  }
1842  for (i = 0; i < 8; i++) {
1843  if (s->s.refs[i].f->buf[0])
1844  ff_thread_release_ext_buffer(dst, &s->s.refs[i]);
1845  if (ssrc->next_refs[i].f->buf[0]) {
1846  if ((ret = ff_thread_ref_frame(&s->s.refs[i], &ssrc->next_refs[i])) < 0)
1847  return ret;
1848  }
1849  }
1850 
1851  s->s.h.invisible = ssrc->s.h.invisible;
1852  s->s.h.keyframe = ssrc->s.h.keyframe;
1853  s->s.h.intraonly = ssrc->s.h.intraonly;
1854  s->ss_v = ssrc->ss_v;
1855  s->ss_h = ssrc->ss_h;
1856  s->s.h.segmentation.enabled = ssrc->s.h.segmentation.enabled;
1857  s->s.h.segmentation.update_map = ssrc->s.h.segmentation.update_map;
1858  s->s.h.segmentation.absolute_vals = ssrc->s.h.segmentation.absolute_vals;
1859  s->bytesperpixel = ssrc->bytesperpixel;
1860  s->gf_fmt = ssrc->gf_fmt;
1861  s->w = ssrc->w;
1862  s->h = ssrc->h;
1863  s->s.h.bpp = ssrc->s.h.bpp;
1864  s->bpp_index = ssrc->bpp_index;
1865  s->pix_fmt = ssrc->pix_fmt;
1866  memcpy(&s->prob_ctx, &ssrc->prob_ctx, sizeof(s->prob_ctx));
1867  memcpy(&s->s.h.lf_delta, &ssrc->s.h.lf_delta, sizeof(s->s.h.lf_delta));
1868  memcpy(&s->s.h.segmentation.feat, &ssrc->s.h.segmentation.feat,
1869  sizeof(s->s.h.segmentation.feat));
1870 
1871  return 0;
1872 }
1873 #endif
1874 
1876  .p.name = "vp9",
1877  .p.long_name = NULL_IF_CONFIG_SMALL("Google VP9"),
1878  .p.type = AVMEDIA_TYPE_VIDEO,
1879  .p.id = AV_CODEC_ID_VP9,
1880  .priv_data_size = sizeof(VP9Context),
1881  .init = vp9_decode_init,
1882  .close = vp9_decode_free,
1885  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
1888  .flush = vp9_decode_flush,
1889  .update_thread_context = ONLY_IF_THREADS_ENABLED(vp9_decode_update_thread_context),
1890  .p.profiles = NULL_IF_CONFIG_SMALL(ff_vp9_profiles),
1891  .bsfs = "vp9_superframe_split",
1892  .hw_configs = (const AVCodecHWConfigInternal *const []) {
1893 #if CONFIG_VP9_DXVA2_HWACCEL
1894  HWACCEL_DXVA2(vp9),
1895 #endif
1896 #if CONFIG_VP9_D3D11VA_HWACCEL
1897  HWACCEL_D3D11VA(vp9),
1898 #endif
1899 #if CONFIG_VP9_D3D11VA2_HWACCEL
1900  HWACCEL_D3D11VA2(vp9),
1901 #endif
1902 #if CONFIG_VP9_NVDEC_HWACCEL
1903  HWACCEL_NVDEC(vp9),
1904 #endif
1905 #if CONFIG_VP9_VAAPI_HWACCEL
1906  HWACCEL_VAAPI(vp9),
1907 #endif
1908 #if CONFIG_VP9_VDPAU_HWACCEL
1909  HWACCEL_VDPAU(vp9),
1910 #endif
1911 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
1912  HWACCEL_VIDEOTOOLBOX(vp9),
1913 #endif
1914  NULL
1915  },
1916 };
AVVideoEncParams::qp
int32_t qp
Base quantisation parameter for the frame.
Definition: video_enc_params.h:103
hwconfig.h
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1388
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:280
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:225
FF_CODEC_CAP_SLICE_THREAD_HAS_MF
#define FF_CODEC_CAP_SLICE_THREAD_HAS_MF
Codec initializes slice-based threading with a main function.
Definition: codec_internal.h:63
decode_tiles
static int decode_tiles(AVCodecContext *avctx, const uint8_t *data, int size)
Definition: vp9.c:1264
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
vp9_frame_alloc
static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
Definition: vp9.c:107
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:41
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
PRED_SWITCHABLE
@ PRED_SWITCHABLE
Definition: vp9shared.h:52
PRED_SINGLEREF
@ PRED_SINGLEREF
Definition: vp9shared.h:50
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:975
ff_thread_release_ext_buffer
void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f)
Unref a ThreadFrame.
Definition: pthread_frame.c:1141
VP9Frame::segmentation_map
uint8_t * segmentation_map
Definition: vp9shared.h:68
VP9Frame
Definition: vp9shared.h:65
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:119
ff_vp9_decoder
const FFCodec ff_vp9_decoder
Definition: vp9.c:1875
decode_sb
static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1093
ff_vp9_adapt_probs
void ff_vp9_adapt_probs(VP9Context *s)
Definition: vp9prob.c:44
vp9_decode_flush
static void vp9_decode_flush(AVCodecContext *avctx)
Definition: vp9.c:1787
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:116
PRED_COMPREF
@ PRED_COMPREF
Definition: vp9shared.h:51
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
pixdesc.h
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:67
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:601
BlockPartition
BlockPartition
Definition: vp9shared.h:35
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:374
DC_PRED
@ DC_PRED
Definition: vp9.h:48
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:69
b
#define b
Definition: input.c:34
data
const char data[16]
Definition: mxf.c:146
update_size
static int update_size(AVCodecContext *avctx, int w, int h)
Definition: vp9.c:183
decode_sb_mem
static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1172
REF_FRAME_SEGMAP
#define REF_FRAME_SEGMAP
Definition: vp9shared.h:171
decode_frame_header
static int decode_frame_header(AVCodecContext *avctx, const uint8_t *data, int size, int *ref)
Definition: vp9.c:508
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:408
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:219
FFCodec
Definition: codec_internal.h:118
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:528
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
VP9_SYNCCODE
#define VP9_SYNCCODE
Definition: vp9.c:46
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
vp89_rac.h
VP9Frame::hwaccel_priv_buf
AVBufferRef * hwaccel_priv_buf
Definition: vp9shared.h:72
VP9Filter
Definition: vp9dec.h:79
VPXRangeCoder
Definition: vpx_rac.h:35
thread.h
ff_pthread_free
av_cold void ff_pthread_free(void *obj, const unsigned offsets[])
Definition: pthread.c:94
FILTER_SWITCHABLE
@ FILTER_SWITCHABLE
Definition: vp9.h:70
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
VP9Block
Definition: vp9dec.h:85
init
static int init
Definition: av_tx.c:47
VP9Frame::tf
ThreadFrame tf
Definition: vp9shared.h:66
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:533
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
AVCOL_SPC_RESERVED
@ AVCOL_SPC_RESERVED
reserved for future use by ITU-T and ISO/IEC just like 15-255 are
Definition: pixfmt.h:531
AVHWAccel
Definition: avcodec.h:2076
VP9Frame::extradata
AVBufferRef * extradata
Definition: vp9shared.h:67
TX_SWITCHABLE
@ TX_SWITCHABLE
Definition: vp9.h:33
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:122
finish
static void finish(void)
Definition: movenc.c:342
ff_vp9_ac_qlookup
const int16_t ff_vp9_ac_qlookup[3][256]
Definition: vp9data.c:334
AVVideoEncParams::delta_qp
int32_t delta_qp[4][2]
Quantisation parameter offset from the base (per-frame) qp for a given plane (first index) and AC/DC ...
Definition: video_enc_params.h:109
fail
#define fail()
Definition: checkasm.h:131
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:424
GetBitContext
Definition: get_bits.h:61
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:478
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:75
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
PARTITION_NONE
@ PARTITION_NONE
Definition: vp9shared.h:36
VP9Frame::hwaccel_picture_private
void * hwaccel_picture_private
Definition: vp9shared.h:73
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:73
vp9_decode_free
static av_cold int vp9_decode_free(AVCodecContext *avctx)
Definition: vp9.c:1238
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:104
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:411
avassert.h
ff_vp9_model_pareto8
const uint8_t ff_vp9_model_pareto8[256][8]
Definition: vp9data.c:1176
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:667
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1854
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:595
BL_8X8
@ BL_8X8
Definition: vp9shared.h:80
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:387
PARTITION_V
@ PARTITION_V
Definition: vp9shared.h:38
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:260
vp9_frame_ref
static int vp9_frame_ref(AVCodecContext *avctx, VP9Frame *dst, VP9Frame *src)
Definition: vp9.c:153
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:127
s
#define s(width, name)
Definition: cbs_vp9.c:256
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:534
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:376
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:218
vp9data.h
bits
uint8_t bits
Definition: vp3data.h:141
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
get_bits.h
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
ff_vp9dsp_init
av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact)
Definition: vp9dsp.c:88
decode012
static int decode012(GetBitContext *gb)
Definition: get_bits.h:821
ff_vp9_partition_tree
const int8_t ff_vp9_partition_tree[3][2]
Definition: vp9data.c:35
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
vp9_decode_frame
static int vp9_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: vp9.c:1559
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:891
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:113
threadframe.h
NULL
#define NULL
Definition: coverity.c:32
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:982
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
VP9Context
Definition: vp9dec.h:97
REF_FRAME_MVPAIR
#define REF_FRAME_MVPAIR
Definition: vp9shared.h:170
AVHWAccel::end_frame
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:2176
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
vp89_rac_get_uint
static av_unused int vp89_rac_get_uint(VPXRangeCoder *c, int bits)
Definition: vp89_rac.h:41
profiles.h
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:322
AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:410
pthread_internal.h
ONLY_IF_THREADS_ENABLED
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:156
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:409
VP9mv
Definition: vp9shared.h:55
PARTITION_SPLIT
@ PARTITION_SPLIT
Definition: vp9shared.h:39
atomic_load_explicit
#define atomic_load_explicit(object, order)
Definition: stdatomic.h:96
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
pthread_mutex_unlock
#define pthread_mutex_unlock(a)
Definition: ffprobe.c:77
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:58
vp9.h
VP9Frame::uses_2pass
int uses_2pass
Definition: vp9shared.h:70
f
f
Definition: af_crystalizer.c:122
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:375
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:353
codec_internal.h
pix_fmt_rgb
static enum AVPixelFormat pix_fmt_rgb[3]
Definition: libdav1d.c:65
REF_INVALID_SCALE
#define REF_INVALID_SCALE
Definition: vp9dec.h:43
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
read_colorspace_details
static int read_colorspace_details(AVCodecContext *avctx)
Definition: vp9.c:446
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:413
size
int size
Definition: twinvq_data.h:10344
vp9_alloc_entries
static int vp9_alloc_entries(AVCodecContext *avctx, int n)
Definition: vp9.c:88
atomic_fetch_add_explicit
#define atomic_fetch_add_explicit(object, operand, order)
Definition: stdatomic.h:149
FF_CODEC_CAP_ALLOCATE_PROGRESS
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
Definition: codec_internal.h:68
free_buffers
static void free_buffers(VP9Context *s)
Definition: vp9.c:1229
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:415
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1484
AVCodecHWConfigInternal
Definition: hwconfig.h:29
TX_4X4
@ TX_4X4
Definition: vp9.h:28
update_block_buffers
static int update_block_buffers(AVCodecContext *avctx)
Definition: vp9.c:311
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:373
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:117
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:79
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:305
inv_recenter_nonneg
static av_always_inline int inv_recenter_nonneg(int v, int m)
Definition: vp9.c:378
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
vpx_rac_is_end
static av_always_inline int vpx_rac_is_end(VPXRangeCoder *c)
returns 1 if the end of the stream has been reached, 0 otherwise.
Definition: vpx_rac.h:51
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1483
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:187
ff_slice_thread_execute_with_mainfunc
int ff_slice_thread_execute_with_mainfunc(AVCodecContext *avctx, action_func2 *func2, main_func *mainfunc, void *arg, int *ret, int job_count)
Definition: pthread_slice.c:126
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
Definition: pixfmt.h:535
assign
#define assign(var, type, n)
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:274
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
update_prob
static int update_prob(VPXRangeCoder *c, int p)
Definition: vp9.c:388
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:367
DEFINE_OFFSET_ARRAY
#define DEFINE_OFFSET_ARRAY(type, name, cnt_variable, mutexes, conds)
Definition: pthread_internal.h:61
AVCodecContext::properties
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1853
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:538
AVHWAccel::decode_slice
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:2165
vpx_rac.h
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:425
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:527
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
vpx_rac_get_prob_branchy
static av_always_inline int vpx_rac_get_prob_branchy(VPXRangeCoder *c, int prob)
Definition: vpx_rac.h:99
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:120
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:264
get_sbits_inv
static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
Definition: vp9.c:372
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:211
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:530
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:608
ff_thread_get_ext_buffer
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around ff_get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:1043
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:584
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:272
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:77
avcodec.h
limit
static double limit(double x)
Definition: vf_pseudocolor.c:128
vp89_rac_get_tree
static av_always_inline int vp89_rac_get_tree(VPXRangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
Definition: vp89_rac.h:54
BL_64X64
@ BL_64X64
Definition: vp9shared.h:77
av_buffer_allocz
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:93
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
vp9_decode_init
static av_cold int vp9_decode_init(AVCodecContext *avctx)
Definition: vp9.c:1798
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:683
ff_vpx_init_range_decoder
int ff_vpx_init_range_decoder(VPXRangeCoder *c, const uint8_t *buf, int buf_size)
Definition: vpx_rac.c:42
vp9_tile_data_free
static void vp9_tile_data_free(VP9TileData *td)
Definition: vp9.c:91
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
VP9mvrefPair
Definition: vp9shared.h:60
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:412
pthread_cond_signal
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
Definition: os2threads.h:152
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:366
VP9TileData
Definition: vp9dec.h:168
vp89_rac_get
static av_always_inline int vp89_rac_get(VPXRangeCoder *c)
Definition: vp89_rac.h:36
AVCodecContext
main external API structure.
Definition: avcodec.h:398
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1491
VP9Filter::mask
uint8_t mask[2][2][8][4]
Definition: vp9dec.h:82
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AVHWAccel::frame_priv_data_size
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
Definition: avcodec.h:2185
vp9_frame_unref
static void vp9_frame_unref(AVCodecContext *avctx, VP9Frame *f)
Definition: vp9.c:98
VP9Frame::mv
VP9mvrefPair * mv
Definition: vp9shared.h:69
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1556
ff_vp9_decode_block
void ff_vp9_decode_block(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl, enum BlockPartition bp)
Definition: vp9block.c:1263
NEARESTMV
@ NEARESTMV
Definition: vp9shared.h:43
ff_thread_get_format
FF_DISABLE_DEPRECATION_WARNINGS enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
Definition: pthread_frame.c:1005
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
BlockLevel
BlockLevel
Definition: vp9shared.h:76
AVCodecContext::export_side_data
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame,...
Definition: avcodec.h:2015
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
ff_pthread_init
av_cold int ff_pthread_init(void *obj, const unsigned offsets[])
Initialize/destroy a list of mutexes/conditions contained in a structure.
Definition: pthread.c:107
pthread_cond_wait
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
Definition: os2threads.h:192
vp9dec.h
AVHWAccel::start_frame
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:2138
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_vp9_default_kf_partition_probs
const uint8_t ff_vp9_default_kf_partition_probs[4][4][3]
Definition: vp9data.c:41
AV_VIDEO_ENC_PARAMS_VP9
@ AV_VIDEO_ENC_PARAMS_VP9
VP9 stores:
Definition: video_enc_params.h:44
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:90
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:288
ff_vp9_default_probs
const ProbContext ff_vp9_default_probs
Definition: vp9data.c:1435
CUR_FRAME
#define CUR_FRAME
Definition: vp9shared.h:169
vp9_export_enc_params
static int vp9_export_enc_params(VP9Context *s, VP9Frame *frame)
Definition: vp9.c:1507
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:425
PARTITION_H
@ PARTITION_H
Definition: vp9shared.h:37
ff_vp9_loopfilter_sb
void ff_vp9_loopfilter_sb(AVCodecContext *avctx, VP9Filter *lflvl, int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff)
Definition: vp9lpf.c:178
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
videodsp.h
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:73
d
d
Definition: ffmpeg_filter.c:155
HWACCEL_MAX
#define HWACCEL_MAX
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Definition: video_enc_params.h:143
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:414
h
h
Definition: vp9dsp_template.c:2038
atomic_init
#define atomic_init(obj, value)
Definition: stdatomic.h:33
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:529
ff_vp9_profiles
const AVProfile ff_vp9_profiles[]
Definition: profiles.c:139
video_enc_params.h
set_tile_offset
static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
Definition: vp9.c:1221
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2595
ff_vp9_dc_qlookup
const int16_t ff_vp9_dc_qlookup[3][256]
Definition: vp9data.c:231
pthread_mutex_lock
#define pthread_mutex_lock(a)
Definition: ffprobe.c:73
ff_vp9_default_coef_probs
const uint8_t ff_vp9_default_coef_probs[4][2][2][6][6][3]
Definition: vp9data.c:1540