FFmpeg
vp9.c
Go to the documentation of this file.
1 /*
2  * VP9 compatible video decoder
3  *
4  * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5  * Copyright (C) 2013 Clément Bœsch <u pkh me>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "avcodec.h"
25 #include "get_bits.h"
26 #include "hwconfig.h"
27 #include "internal.h"
28 #include "profiles.h"
29 #include "thread.h"
30 #include "pthread_internal.h"
31 
32 #include "videodsp.h"
33 #include "vp56.h"
34 #include "vp9.h"
35 #include "vp9data.h"
36 #include "vp9dec.h"
37 #include "libavutil/avassert.h"
38 #include "libavutil/pixdesc.h"
40 
41 #define VP9_SYNCCODE 0x498342
42 
43 #if HAVE_THREADS
44 DEFINE_OFFSET_ARRAY(VP9Context, vp9_context, pthread_init_cnt,
45  (offsetof(VP9Context, progress_mutex)),
46  (offsetof(VP9Context, progress_cond)));
47 
48 static int vp9_alloc_entries(AVCodecContext *avctx, int n) {
49  VP9Context *s = avctx->priv_data;
50  int i;
51 
52  if (avctx->active_thread_type & FF_THREAD_SLICE) {
53  if (s->entries)
54  av_freep(&s->entries);
55 
56  s->entries = av_malloc_array(n, sizeof(atomic_int));
57  if (!s->entries)
58  return AVERROR(ENOMEM);
59 
60  for (i = 0; i < n; i++)
61  atomic_init(&s->entries[i], 0);
62  }
63  return 0;
64 }
65 
66 static void vp9_report_tile_progress(VP9Context *s, int field, int n) {
67  pthread_mutex_lock(&s->progress_mutex);
68  atomic_fetch_add_explicit(&s->entries[field], n, memory_order_release);
69  pthread_cond_signal(&s->progress_cond);
70  pthread_mutex_unlock(&s->progress_mutex);
71 }
72 
73 static void vp9_await_tile_progress(VP9Context *s, int field, int n) {
74  if (atomic_load_explicit(&s->entries[field], memory_order_acquire) >= n)
75  return;
76 
77  pthread_mutex_lock(&s->progress_mutex);
78  while (atomic_load_explicit(&s->entries[field], memory_order_relaxed) != n)
79  pthread_cond_wait(&s->progress_cond, &s->progress_mutex);
80  pthread_mutex_unlock(&s->progress_mutex);
81 }
82 #else
83 static int vp9_alloc_entries(AVCodecContext *avctx, int n) { return 0; }
84 #endif
85 
87 {
88  av_freep(&td->b_base);
89  av_freep(&td->block_base);
90  av_freep(&td->block_structure);
91 }
92 
94 {
95  ff_thread_release_buffer(avctx, &f->tf);
96  av_buffer_unref(&f->extradata);
97  av_buffer_unref(&f->hwaccel_priv_buf);
98  f->segmentation_map = NULL;
99  f->hwaccel_picture_private = NULL;
100 }
101 
103 {
104  VP9Context *s = avctx->priv_data;
105  int ret, sz;
106 
108  if (ret < 0)
109  return ret;
110 
111  sz = 64 * s->sb_cols * s->sb_rows;
112  if (sz != s->frame_extradata_pool_size) {
113  av_buffer_pool_uninit(&s->frame_extradata_pool);
114  s->frame_extradata_pool = av_buffer_pool_init(sz * (1 + sizeof(VP9mvrefPair)), NULL);
115  if (!s->frame_extradata_pool) {
116  s->frame_extradata_pool_size = 0;
117  goto fail;
118  }
119  s->frame_extradata_pool_size = sz;
120  }
121  f->extradata = av_buffer_pool_get(s->frame_extradata_pool);
122  if (!f->extradata) {
123  goto fail;
124  }
125  memset(f->extradata->data, 0, f->extradata->size);
126 
127  f->segmentation_map = f->extradata->data;
128  f->mv = (VP9mvrefPair *) (f->extradata->data + sz);
129 
130  if (avctx->hwaccel) {
131  const AVHWAccel *hwaccel = avctx->hwaccel;
132  av_assert0(!f->hwaccel_picture_private);
133  if (hwaccel->frame_priv_data_size) {
134  f->hwaccel_priv_buf = av_buffer_allocz(hwaccel->frame_priv_data_size);
135  if (!f->hwaccel_priv_buf)
136  goto fail;
137  f->hwaccel_picture_private = f->hwaccel_priv_buf->data;
138  }
139  }
140 
141  return 0;
142 
143 fail:
144  vp9_frame_unref(avctx, f);
145  return AVERROR(ENOMEM);
146 }
147 
149 {
150  int ret;
151 
152  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
153  if (ret < 0)
154  return ret;
155 
156  dst->extradata = av_buffer_ref(src->extradata);
157  if (!dst->extradata)
158  goto fail;
159 
160  dst->segmentation_map = src->segmentation_map;
161  dst->mv = src->mv;
162  dst->uses_2pass = src->uses_2pass;
163 
164  if (src->hwaccel_picture_private) {
165  dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
166  if (!dst->hwaccel_priv_buf)
167  goto fail;
169  }
170 
171  return 0;
172 
173 fail:
174  vp9_frame_unref(avctx, dst);
175  return AVERROR(ENOMEM);
176 }
177 
178 static int update_size(AVCodecContext *avctx, int w, int h)
179 {
180 #define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + \
181  CONFIG_VP9_D3D11VA_HWACCEL * 2 + \
182  CONFIG_VP9_NVDEC_HWACCEL + \
183  CONFIG_VP9_VAAPI_HWACCEL + \
184  CONFIG_VP9_VDPAU_HWACCEL)
185  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
186  VP9Context *s = avctx->priv_data;
187  uint8_t *p;
188  int bytesperpixel = s->bytesperpixel, ret, cols, rows;
189  int lflvl_len, i;
190 
191  av_assert0(w > 0 && h > 0);
192 
193  if (!(s->pix_fmt == s->gf_fmt && w == s->w && h == s->h)) {
194  if ((ret = ff_set_dimensions(avctx, w, h)) < 0)
195  return ret;
196 
197  switch (s->pix_fmt) {
198  case AV_PIX_FMT_YUV420P:
200 #if CONFIG_VP9_DXVA2_HWACCEL
201  *fmtp++ = AV_PIX_FMT_DXVA2_VLD;
202 #endif
203 #if CONFIG_VP9_D3D11VA_HWACCEL
204  *fmtp++ = AV_PIX_FMT_D3D11VA_VLD;
205  *fmtp++ = AV_PIX_FMT_D3D11;
206 #endif
207 #if CONFIG_VP9_NVDEC_HWACCEL
208  *fmtp++ = AV_PIX_FMT_CUDA;
209 #endif
210 #if CONFIG_VP9_VAAPI_HWACCEL
211  *fmtp++ = AV_PIX_FMT_VAAPI;
212 #endif
213 #if CONFIG_VP9_VDPAU_HWACCEL
214  *fmtp++ = AV_PIX_FMT_VDPAU;
215 #endif
216  break;
218 #if CONFIG_VP9_NVDEC_HWACCEL
219  *fmtp++ = AV_PIX_FMT_CUDA;
220 #endif
221 #if CONFIG_VP9_VAAPI_HWACCEL
222  *fmtp++ = AV_PIX_FMT_VAAPI;
223 #endif
224 #if CONFIG_VP9_VDPAU_HWACCEL
225  *fmtp++ = AV_PIX_FMT_VDPAU;
226 #endif
227  break;
228  }
229 
230  *fmtp++ = s->pix_fmt;
231  *fmtp = AV_PIX_FMT_NONE;
232 
234  if (ret < 0)
235  return ret;
236 
237  avctx->pix_fmt = ret;
238  s->gf_fmt = s->pix_fmt;
239  s->w = w;
240  s->h = h;
241  }
242 
243  cols = (w + 7) >> 3;
244  rows = (h + 7) >> 3;
245 
246  if (s->intra_pred_data[0] && cols == s->cols && rows == s->rows && s->pix_fmt == s->last_fmt)
247  return 0;
248 
249  s->last_fmt = s->pix_fmt;
250  s->sb_cols = (w + 63) >> 6;
251  s->sb_rows = (h + 63) >> 6;
252  s->cols = (w + 7) >> 3;
253  s->rows = (h + 7) >> 3;
254  lflvl_len = avctx->active_thread_type == FF_THREAD_SLICE ? s->sb_rows : 1;
255 
256 #define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)
257  av_freep(&s->intra_pred_data[0]);
258  // FIXME we slightly over-allocate here for subsampled chroma, but a little
259  // bit of padding shouldn't affect performance...
260  p = av_malloc(s->sb_cols * (128 + 192 * bytesperpixel +
261  lflvl_len * sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx)));
262  if (!p)
263  return AVERROR(ENOMEM);
264  assign(s->intra_pred_data[0], uint8_t *, 64 * bytesperpixel);
265  assign(s->intra_pred_data[1], uint8_t *, 64 * bytesperpixel);
266  assign(s->intra_pred_data[2], uint8_t *, 64 * bytesperpixel);
267  assign(s->above_y_nnz_ctx, uint8_t *, 16);
268  assign(s->above_mode_ctx, uint8_t *, 16);
269  assign(s->above_mv_ctx, VP56mv(*)[2], 16);
270  assign(s->above_uv_nnz_ctx[0], uint8_t *, 16);
271  assign(s->above_uv_nnz_ctx[1], uint8_t *, 16);
272  assign(s->above_partition_ctx, uint8_t *, 8);
273  assign(s->above_skip_ctx, uint8_t *, 8);
274  assign(s->above_txfm_ctx, uint8_t *, 8);
275  assign(s->above_segpred_ctx, uint8_t *, 8);
276  assign(s->above_intra_ctx, uint8_t *, 8);
277  assign(s->above_comp_ctx, uint8_t *, 8);
278  assign(s->above_ref_ctx, uint8_t *, 8);
279  assign(s->above_filter_ctx, uint8_t *, 8);
280  assign(s->lflvl, VP9Filter *, lflvl_len);
281 #undef assign
282 
283  if (s->td) {
284  for (i = 0; i < s->active_tile_cols; i++)
285  vp9_tile_data_free(&s->td[i]);
286  }
287 
288  if (s->s.h.bpp != s->last_bpp) {
289  ff_vp9dsp_init(&s->dsp, s->s.h.bpp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
290  ff_videodsp_init(&s->vdsp, s->s.h.bpp);
291  s->last_bpp = s->s.h.bpp;
292  }
293 
294  return 0;
295 }
296 
298 {
299  int i;
300  VP9Context *s = avctx->priv_data;
301  int chroma_blocks, chroma_eobs, bytesperpixel = s->bytesperpixel;
302  VP9TileData *td = &s->td[0];
303 
304  if (td->b_base && td->block_base && s->block_alloc_using_2pass == s->s.frames[CUR_FRAME].uses_2pass)
305  return 0;
306 
308  chroma_blocks = 64 * 64 >> (s->ss_h + s->ss_v);
309  chroma_eobs = 16 * 16 >> (s->ss_h + s->ss_v);
310  if (s->s.frames[CUR_FRAME].uses_2pass) {
311  int sbs = s->sb_cols * s->sb_rows;
312 
313  td->b_base = av_malloc_array(s->cols * s->rows, sizeof(VP9Block));
314  td->block_base = av_mallocz(((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
315  16 * 16 + 2 * chroma_eobs) * sbs);
316  if (!td->b_base || !td->block_base)
317  return AVERROR(ENOMEM);
318  td->uvblock_base[0] = td->block_base + sbs * 64 * 64 * bytesperpixel;
319  td->uvblock_base[1] = td->uvblock_base[0] + sbs * chroma_blocks * bytesperpixel;
320  td->eob_base = (uint8_t *) (td->uvblock_base[1] + sbs * chroma_blocks * bytesperpixel);
321  td->uveob_base[0] = td->eob_base + 16 * 16 * sbs;
322  td->uveob_base[1] = td->uveob_base[0] + chroma_eobs * sbs;
323 
325  td->block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
326  if (!td->block_structure)
327  return AVERROR(ENOMEM);
328  }
329  } else {
330  for (i = 1; i < s->active_tile_cols; i++)
331  vp9_tile_data_free(&s->td[i]);
332 
333  for (i = 0; i < s->active_tile_cols; i++) {
334  s->td[i].b_base = av_malloc(sizeof(VP9Block));
335  s->td[i].block_base = av_mallocz((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
336  16 * 16 + 2 * chroma_eobs);
337  if (!s->td[i].b_base || !s->td[i].block_base)
338  return AVERROR(ENOMEM);
339  s->td[i].uvblock_base[0] = s->td[i].block_base + 64 * 64 * bytesperpixel;
340  s->td[i].uvblock_base[1] = s->td[i].uvblock_base[0] + chroma_blocks * bytesperpixel;
341  s->td[i].eob_base = (uint8_t *) (s->td[i].uvblock_base[1] + chroma_blocks * bytesperpixel);
342  s->td[i].uveob_base[0] = s->td[i].eob_base + 16 * 16;
343  s->td[i].uveob_base[1] = s->td[i].uveob_base[0] + chroma_eobs;
344 
346  s->td[i].block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
347  if (!s->td[i].block_structure)
348  return AVERROR(ENOMEM);
349  }
350  }
351  }
352  s->block_alloc_using_2pass = s->s.frames[CUR_FRAME].uses_2pass;
353 
354  return 0;
355 }
356 
357 // The sign bit is at the end, not the start, of a bit sequence
359 {
360  int v = get_bits(gb, n);
361  return get_bits1(gb) ? -v : v;
362 }
363 
364 static av_always_inline int inv_recenter_nonneg(int v, int m)
365 {
366  if (v > 2 * m)
367  return v;
368  if (v & 1)
369  return m - ((v + 1) >> 1);
370  return m + (v >> 1);
371 }
372 
373 // differential forward probability updates
374 static int update_prob(VP56RangeCoder *c, int p)
375 {
376  static const uint8_t inv_map_table[255] = {
377  7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
378  189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
379  10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
380  25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39,
381  40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54,
382  55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
383  70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
384  86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100,
385  101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
386  116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
387  131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
388  146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
389  161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
390  177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
391  192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
392  207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
393  222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
394  237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
395  252, 253, 253,
396  };
397  int d;
398 
399  /* This code is trying to do a differential probability update. For a
400  * current probability A in the range [1, 255], the difference to a new
401  * probability of any value can be expressed differentially as 1-A, 255-A
402  * where some part of this (absolute range) exists both in positive as
403  * well as the negative part, whereas another part only exists in one
404  * half. We're trying to code this shared part differentially, i.e.
405  * times two where the value of the lowest bit specifies the sign, and
406  * the single part is then coded on top of this. This absolute difference
407  * then again has a value of [0, 254], but a bigger value in this range
408  * indicates that we're further away from the original value A, so we
409  * can code this as a VLC code, since higher values are increasingly
410  * unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough'
411  * updates vs. the 'fine, exact' updates further down the range, which
412  * adds one extra dimension to this differential update model. */
413 
414  if (!vp8_rac_get(c)) {
415  d = vp8_rac_get_uint(c, 4) + 0;
416  } else if (!vp8_rac_get(c)) {
417  d = vp8_rac_get_uint(c, 4) + 16;
418  } else if (!vp8_rac_get(c)) {
419  d = vp8_rac_get_uint(c, 5) + 32;
420  } else {
421  d = vp8_rac_get_uint(c, 7);
422  if (d >= 65)
423  d = (d << 1) - 65 + vp8_rac_get(c);
424  d += 64;
425  av_assert2(d < FF_ARRAY_ELEMS(inv_map_table));
426  }
427 
428  return p <= 128 ? 1 + inv_recenter_nonneg(inv_map_table[d], p - 1) :
429  255 - inv_recenter_nonneg(inv_map_table[d], 255 - p);
430 }
431 
433 {
434  static const enum AVColorSpace colorspaces[8] = {
437  };
438  VP9Context *s = avctx->priv_data;
439  int bits = avctx->profile <= 1 ? 0 : 1 + get_bits1(&s->gb); // 0:8, 1:10, 2:12
440 
441  s->bpp_index = bits;
442  s->s.h.bpp = 8 + bits * 2;
443  s->bytesperpixel = (7 + s->s.h.bpp) >> 3;
444  avctx->colorspace = colorspaces[get_bits(&s->gb, 3)];
445  if (avctx->colorspace == AVCOL_SPC_RGB) { // RGB = profile 1
446  static const enum AVPixelFormat pix_fmt_rgb[3] = {
448  };
449  s->ss_h = s->ss_v = 0;
450  avctx->color_range = AVCOL_RANGE_JPEG;
451  s->pix_fmt = pix_fmt_rgb[bits];
452  if (avctx->profile & 1) {
453  if (get_bits1(&s->gb)) {
454  av_log(avctx, AV_LOG_ERROR, "Reserved bit set in RGB\n");
455  return AVERROR_INVALIDDATA;
456  }
457  } else {
458  av_log(avctx, AV_LOG_ERROR, "RGB not supported in profile %d\n",
459  avctx->profile);
460  return AVERROR_INVALIDDATA;
461  }
462  } else {
463  static const enum AVPixelFormat pix_fmt_for_ss[3][2 /* v */][2 /* h */] = {
470  };
472  if (avctx->profile & 1) {
473  s->ss_h = get_bits1(&s->gb);
474  s->ss_v = get_bits1(&s->gb);
475  s->pix_fmt = pix_fmt_for_ss[bits][s->ss_v][s->ss_h];
476  if (s->pix_fmt == AV_PIX_FMT_YUV420P) {
477  av_log(avctx, AV_LOG_ERROR, "YUV 4:2:0 not supported in profile %d\n",
478  avctx->profile);
479  return AVERROR_INVALIDDATA;
480  } else if (get_bits1(&s->gb)) {
481  av_log(avctx, AV_LOG_ERROR, "Profile %d color details reserved bit set\n",
482  avctx->profile);
483  return AVERROR_INVALIDDATA;
484  }
485  } else {
486  s->ss_h = s->ss_v = 1;
487  s->pix_fmt = pix_fmt_for_ss[bits][1][1];
488  }
489  }
490 
491  return 0;
492 }
493 
495  const uint8_t *data, int size, int *ref)
496 {
497  VP9Context *s = avctx->priv_data;
498  int c, i, j, k, l, m, n, w, h, max, size2, ret, sharp;
499  int last_invisible;
500  const uint8_t *data2;
501 
502  /* general header */
503  if ((ret = init_get_bits8(&s->gb, data, size)) < 0) {
504  av_log(avctx, AV_LOG_ERROR, "Failed to initialize bitstream reader\n");
505  return ret;
506  }
507  if (get_bits(&s->gb, 2) != 0x2) { // frame marker
508  av_log(avctx, AV_LOG_ERROR, "Invalid frame marker\n");
509  return AVERROR_INVALIDDATA;
510  }
511  avctx->profile = get_bits1(&s->gb);
512  avctx->profile |= get_bits1(&s->gb) << 1;
513  if (avctx->profile == 3) avctx->profile += get_bits1(&s->gb);
514  if (avctx->profile > 3) {
515  av_log(avctx, AV_LOG_ERROR, "Profile %d is not yet supported\n", avctx->profile);
516  return AVERROR_INVALIDDATA;
517  }
518  s->s.h.profile = avctx->profile;
519  if (get_bits1(&s->gb)) {
520  *ref = get_bits(&s->gb, 3);
521  return 0;
522  }
523 
524  s->last_keyframe = s->s.h.keyframe;
525  s->s.h.keyframe = !get_bits1(&s->gb);
526 
527  last_invisible = s->s.h.invisible;
528  s->s.h.invisible = !get_bits1(&s->gb);
529  s->s.h.errorres = get_bits1(&s->gb);
530  s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible;
531 
532  if (s->s.h.keyframe) {
533  if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
534  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
535  return AVERROR_INVALIDDATA;
536  }
537  if ((ret = read_colorspace_details(avctx)) < 0)
538  return ret;
539  // for profile 1, here follows the subsampling bits
540  s->s.h.refreshrefmask = 0xff;
541  w = get_bits(&s->gb, 16) + 1;
542  h = get_bits(&s->gb, 16) + 1;
543  if (get_bits1(&s->gb)) // display size
544  skip_bits(&s->gb, 32);
545  } else {
546  s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0;
547  s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2);
548  if (s->s.h.intraonly) {
549  if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
550  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
551  return AVERROR_INVALIDDATA;
552  }
553  if (avctx->profile >= 1) {
554  if ((ret = read_colorspace_details(avctx)) < 0)
555  return ret;
556  } else {
557  s->ss_h = s->ss_v = 1;
558  s->s.h.bpp = 8;
559  s->bpp_index = 0;
560  s->bytesperpixel = 1;
561  s->pix_fmt = AV_PIX_FMT_YUV420P;
562  avctx->colorspace = AVCOL_SPC_BT470BG;
563  avctx->color_range = AVCOL_RANGE_MPEG;
564  }
565  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
566  w = get_bits(&s->gb, 16) + 1;
567  h = get_bits(&s->gb, 16) + 1;
568  if (get_bits1(&s->gb)) // display size
569  skip_bits(&s->gb, 32);
570  } else {
571  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
572  s->s.h.refidx[0] = get_bits(&s->gb, 3);
573  s->s.h.signbias[0] = get_bits1(&s->gb) && !s->s.h.errorres;
574  s->s.h.refidx[1] = get_bits(&s->gb, 3);
575  s->s.h.signbias[1] = get_bits1(&s->gb) && !s->s.h.errorres;
576  s->s.h.refidx[2] = get_bits(&s->gb, 3);
577  s->s.h.signbias[2] = get_bits1(&s->gb) && !s->s.h.errorres;
578  if (!s->s.refs[s->s.h.refidx[0]].f->buf[0] ||
579  !s->s.refs[s->s.h.refidx[1]].f->buf[0] ||
580  !s->s.refs[s->s.h.refidx[2]].f->buf[0]) {
581  av_log(avctx, AV_LOG_ERROR, "Not all references are available\n");
582  return AVERROR_INVALIDDATA;
583  }
584  if (get_bits1(&s->gb)) {
585  w = s->s.refs[s->s.h.refidx[0]].f->width;
586  h = s->s.refs[s->s.h.refidx[0]].f->height;
587  } else if (get_bits1(&s->gb)) {
588  w = s->s.refs[s->s.h.refidx[1]].f->width;
589  h = s->s.refs[s->s.h.refidx[1]].f->height;
590  } else if (get_bits1(&s->gb)) {
591  w = s->s.refs[s->s.h.refidx[2]].f->width;
592  h = s->s.refs[s->s.h.refidx[2]].f->height;
593  } else {
594  w = get_bits(&s->gb, 16) + 1;
595  h = get_bits(&s->gb, 16) + 1;
596  }
597  // Note that in this code, "CUR_FRAME" is actually before we
598  // have formally allocated a frame, and thus actually represents
599  // the _last_ frame
600  s->s.h.use_last_frame_mvs &= s->s.frames[CUR_FRAME].tf.f->width == w &&
601  s->s.frames[CUR_FRAME].tf.f->height == h;
602  if (get_bits1(&s->gb)) // display size
603  skip_bits(&s->gb, 32);
604  s->s.h.highprecisionmvs = get_bits1(&s->gb);
605  s->s.h.filtermode = get_bits1(&s->gb) ? FILTER_SWITCHABLE :
606  get_bits(&s->gb, 2);
607  s->s.h.allowcompinter = s->s.h.signbias[0] != s->s.h.signbias[1] ||
608  s->s.h.signbias[0] != s->s.h.signbias[2];
609  if (s->s.h.allowcompinter) {
610  if (s->s.h.signbias[0] == s->s.h.signbias[1]) {
611  s->s.h.fixcompref = 2;
612  s->s.h.varcompref[0] = 0;
613  s->s.h.varcompref[1] = 1;
614  } else if (s->s.h.signbias[0] == s->s.h.signbias[2]) {
615  s->s.h.fixcompref = 1;
616  s->s.h.varcompref[0] = 0;
617  s->s.h.varcompref[1] = 2;
618  } else {
619  s->s.h.fixcompref = 0;
620  s->s.h.varcompref[0] = 1;
621  s->s.h.varcompref[1] = 2;
622  }
623  }
624  }
625  }
626  s->s.h.refreshctx = s->s.h.errorres ? 0 : get_bits1(&s->gb);
627  s->s.h.parallelmode = s->s.h.errorres ? 1 : get_bits1(&s->gb);
628  s->s.h.framectxid = c = get_bits(&s->gb, 2);
629  if (s->s.h.keyframe || s->s.h.intraonly)
630  s->s.h.framectxid = 0; // BUG: libvpx ignores this field in keyframes
631 
632  /* loopfilter header data */
633  if (s->s.h.keyframe || s->s.h.errorres || s->s.h.intraonly) {
634  // reset loopfilter defaults
635  s->s.h.lf_delta.ref[0] = 1;
636  s->s.h.lf_delta.ref[1] = 0;
637  s->s.h.lf_delta.ref[2] = -1;
638  s->s.h.lf_delta.ref[3] = -1;
639  s->s.h.lf_delta.mode[0] = 0;
640  s->s.h.lf_delta.mode[1] = 0;
641  memset(s->s.h.segmentation.feat, 0, sizeof(s->s.h.segmentation.feat));
642  }
643  s->s.h.filter.level = get_bits(&s->gb, 6);
644  sharp = get_bits(&s->gb, 3);
645  // if sharpness changed, reinit lim/mblim LUTs. if it didn't change, keep
646  // the old cache values since they are still valid
647  if (s->s.h.filter.sharpness != sharp) {
648  for (i = 1; i <= 63; i++) {
649  int limit = i;
650 
651  if (sharp > 0) {
652  limit >>= (sharp + 3) >> 2;
653  limit = FFMIN(limit, 9 - sharp);
654  }
655  limit = FFMAX(limit, 1);
656 
657  s->filter_lut.lim_lut[i] = limit;
658  s->filter_lut.mblim_lut[i] = 2 * (i + 2) + limit;
659  }
660  }
661  s->s.h.filter.sharpness = sharp;
662  if ((s->s.h.lf_delta.enabled = get_bits1(&s->gb))) {
663  if ((s->s.h.lf_delta.updated = get_bits1(&s->gb))) {
664  for (i = 0; i < 4; i++)
665  if (get_bits1(&s->gb))
666  s->s.h.lf_delta.ref[i] = get_sbits_inv(&s->gb, 6);
667  for (i = 0; i < 2; i++)
668  if (get_bits1(&s->gb))
669  s->s.h.lf_delta.mode[i] = get_sbits_inv(&s->gb, 6);
670  }
671  }
672 
673  /* quantization header data */
674  s->s.h.yac_qi = get_bits(&s->gb, 8);
675  s->s.h.ydc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
676  s->s.h.uvdc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
677  s->s.h.uvac_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
678  s->s.h.lossless = s->s.h.yac_qi == 0 && s->s.h.ydc_qdelta == 0 &&
679  s->s.h.uvdc_qdelta == 0 && s->s.h.uvac_qdelta == 0;
680  if (s->s.h.lossless)
682 
683  /* segmentation header info */
684  if ((s->s.h.segmentation.enabled = get_bits1(&s->gb))) {
685  if ((s->s.h.segmentation.update_map = get_bits1(&s->gb))) {
686  for (i = 0; i < 7; i++)
687  s->s.h.segmentation.prob[i] = get_bits1(&s->gb) ?
688  get_bits(&s->gb, 8) : 255;
689  if ((s->s.h.segmentation.temporal = get_bits1(&s->gb)))
690  for (i = 0; i < 3; i++)
691  s->s.h.segmentation.pred_prob[i] = get_bits1(&s->gb) ?
692  get_bits(&s->gb, 8) : 255;
693  }
694 
695  if (get_bits1(&s->gb)) {
696  s->s.h.segmentation.absolute_vals = get_bits1(&s->gb);
697  for (i = 0; i < 8; i++) {
698  if ((s->s.h.segmentation.feat[i].q_enabled = get_bits1(&s->gb)))
699  s->s.h.segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8);
700  if ((s->s.h.segmentation.feat[i].lf_enabled = get_bits1(&s->gb)))
701  s->s.h.segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6);
702  if ((s->s.h.segmentation.feat[i].ref_enabled = get_bits1(&s->gb)))
703  s->s.h.segmentation.feat[i].ref_val = get_bits(&s->gb, 2);
704  s->s.h.segmentation.feat[i].skip_enabled = get_bits1(&s->gb);
705  }
706  }
707  }
708 
709  // set qmul[] based on Y/UV, AC/DC and segmentation Q idx deltas
710  for (i = 0; i < (s->s.h.segmentation.enabled ? 8 : 1); i++) {
711  int qyac, qydc, quvac, quvdc, lflvl, sh;
712 
713  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].q_enabled) {
714  if (s->s.h.segmentation.absolute_vals)
715  qyac = av_clip_uintp2(s->s.h.segmentation.feat[i].q_val, 8);
716  else
717  qyac = av_clip_uintp2(s->s.h.yac_qi + s->s.h.segmentation.feat[i].q_val, 8);
718  } else {
719  qyac = s->s.h.yac_qi;
720  }
721  qydc = av_clip_uintp2(qyac + s->s.h.ydc_qdelta, 8);
722  quvdc = av_clip_uintp2(qyac + s->s.h.uvdc_qdelta, 8);
723  quvac = av_clip_uintp2(qyac + s->s.h.uvac_qdelta, 8);
724  qyac = av_clip_uintp2(qyac, 8);
725 
726  s->s.h.segmentation.feat[i].qmul[0][0] = ff_vp9_dc_qlookup[s->bpp_index][qydc];
727  s->s.h.segmentation.feat[i].qmul[0][1] = ff_vp9_ac_qlookup[s->bpp_index][qyac];
728  s->s.h.segmentation.feat[i].qmul[1][0] = ff_vp9_dc_qlookup[s->bpp_index][quvdc];
729  s->s.h.segmentation.feat[i].qmul[1][1] = ff_vp9_ac_qlookup[s->bpp_index][quvac];
730 
731  sh = s->s.h.filter.level >= 32;
732  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].lf_enabled) {
733  if (s->s.h.segmentation.absolute_vals)
734  lflvl = av_clip_uintp2(s->s.h.segmentation.feat[i].lf_val, 6);
735  else
736  lflvl = av_clip_uintp2(s->s.h.filter.level + s->s.h.segmentation.feat[i].lf_val, 6);
737  } else {
738  lflvl = s->s.h.filter.level;
739  }
740  if (s->s.h.lf_delta.enabled) {
741  s->s.h.segmentation.feat[i].lflvl[0][0] =
742  s->s.h.segmentation.feat[i].lflvl[0][1] =
743  av_clip_uintp2(lflvl + (s->s.h.lf_delta.ref[0] * (1 << sh)), 6);
744  for (j = 1; j < 4; j++) {
745  s->s.h.segmentation.feat[i].lflvl[j][0] =
746  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
747  s->s.h.lf_delta.mode[0]) * (1 << sh)), 6);
748  s->s.h.segmentation.feat[i].lflvl[j][1] =
749  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
750  s->s.h.lf_delta.mode[1]) * (1 << sh)), 6);
751  }
752  } else {
753  memset(s->s.h.segmentation.feat[i].lflvl, lflvl,
754  sizeof(s->s.h.segmentation.feat[i].lflvl));
755  }
756  }
757 
758  /* tiling info */
759  if ((ret = update_size(avctx, w, h)) < 0) {
760  av_log(avctx, AV_LOG_ERROR, "Failed to initialize decoder for %dx%d @ %d\n",
761  w, h, s->pix_fmt);
762  return ret;
763  }
764  for (s->s.h.tiling.log2_tile_cols = 0;
765  s->sb_cols > (64 << s->s.h.tiling.log2_tile_cols);
766  s->s.h.tiling.log2_tile_cols++) ;
767  for (max = 0; (s->sb_cols >> max) >= 4; max++) ;
768  max = FFMAX(0, max - 1);
769  while (max > s->s.h.tiling.log2_tile_cols) {
770  if (get_bits1(&s->gb))
771  s->s.h.tiling.log2_tile_cols++;
772  else
773  break;
774  }
775  s->s.h.tiling.log2_tile_rows = decode012(&s->gb);
776  s->s.h.tiling.tile_rows = 1 << s->s.h.tiling.log2_tile_rows;
777  if (s->s.h.tiling.tile_cols != (1 << s->s.h.tiling.log2_tile_cols)) {
778  int n_range_coders;
779  VP56RangeCoder *rc;
780 
781  if (s->td) {
782  for (i = 0; i < s->active_tile_cols; i++)
783  vp9_tile_data_free(&s->td[i]);
784  av_freep(&s->td);
785  }
786 
787  s->s.h.tiling.tile_cols = 1 << s->s.h.tiling.log2_tile_cols;
788  s->active_tile_cols = avctx->active_thread_type == FF_THREAD_SLICE ?
789  s->s.h.tiling.tile_cols : 1;
790  vp9_alloc_entries(avctx, s->sb_rows);
791  if (avctx->active_thread_type == FF_THREAD_SLICE) {
792  n_range_coders = 4; // max_tile_rows
793  } else {
794  n_range_coders = s->s.h.tiling.tile_cols;
795  }
796  s->td = av_calloc(s->active_tile_cols, sizeof(VP9TileData) +
797  n_range_coders * sizeof(VP56RangeCoder));
798  if (!s->td)
799  return AVERROR(ENOMEM);
800  rc = (VP56RangeCoder *) &s->td[s->active_tile_cols];
801  for (i = 0; i < s->active_tile_cols; i++) {
802  s->td[i].s = s;
803  s->td[i].c_b = rc;
804  rc += n_range_coders;
805  }
806  }
807 
808  /* check reference frames */
809  if (!s->s.h.keyframe && !s->s.h.intraonly) {
810  int valid_ref_frame = 0;
811  for (i = 0; i < 3; i++) {
812  AVFrame *ref = s->s.refs[s->s.h.refidx[i]].f;
813  int refw = ref->width, refh = ref->height;
814 
815  if (ref->format != avctx->pix_fmt) {
816  av_log(avctx, AV_LOG_ERROR,
817  "Ref pixfmt (%s) did not match current frame (%s)",
818  av_get_pix_fmt_name(ref->format),
819  av_get_pix_fmt_name(avctx->pix_fmt));
820  return AVERROR_INVALIDDATA;
821  } else if (refw == w && refh == h) {
822  s->mvscale[i][0] = s->mvscale[i][1] = 0;
823  } else {
824  /* Check to make sure at least one of frames that */
825  /* this frame references has valid dimensions */
826  if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 * refh) {
827  av_log(avctx, AV_LOG_WARNING,
828  "Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
829  refw, refh, w, h);
830  s->mvscale[i][0] = s->mvscale[i][1] = REF_INVALID_SCALE;
831  continue;
832  }
833  s->mvscale[i][0] = (refw << 14) / w;
834  s->mvscale[i][1] = (refh << 14) / h;
835  s->mvstep[i][0] = 16 * s->mvscale[i][0] >> 14;
836  s->mvstep[i][1] = 16 * s->mvscale[i][1] >> 14;
837  }
838  valid_ref_frame++;
839  }
840  if (!valid_ref_frame) {
841  av_log(avctx, AV_LOG_ERROR, "No valid reference frame is found, bitstream not supported\n");
842  return AVERROR_INVALIDDATA;
843  }
844  }
845 
846  if (s->s.h.keyframe || s->s.h.errorres || (s->s.h.intraonly && s->s.h.resetctx == 3)) {
847  s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p =
848  s->prob_ctx[3].p = ff_vp9_default_probs;
849  memcpy(s->prob_ctx[0].coef, ff_vp9_default_coef_probs,
850  sizeof(ff_vp9_default_coef_probs));
851  memcpy(s->prob_ctx[1].coef, ff_vp9_default_coef_probs,
852  sizeof(ff_vp9_default_coef_probs));
853  memcpy(s->prob_ctx[2].coef, ff_vp9_default_coef_probs,
854  sizeof(ff_vp9_default_coef_probs));
855  memcpy(s->prob_ctx[3].coef, ff_vp9_default_coef_probs,
856  sizeof(ff_vp9_default_coef_probs));
857  } else if (s->s.h.intraonly && s->s.h.resetctx == 2) {
858  s->prob_ctx[c].p = ff_vp9_default_probs;
859  memcpy(s->prob_ctx[c].coef, ff_vp9_default_coef_probs,
860  sizeof(ff_vp9_default_coef_probs));
861  }
862 
863  // next 16 bits is size of the rest of the header (arith-coded)
864  s->s.h.compressed_header_size = size2 = get_bits(&s->gb, 16);
865  s->s.h.uncompressed_header_size = (get_bits_count(&s->gb) + 7) / 8;
866 
867  data2 = align_get_bits(&s->gb);
868  if (size2 > size - (data2 - data)) {
869  av_log(avctx, AV_LOG_ERROR, "Invalid compressed header size\n");
870  return AVERROR_INVALIDDATA;
871  }
872  ret = ff_vp56_init_range_decoder(&s->c, data2, size2);
873  if (ret < 0)
874  return ret;
875 
876  if (vp56_rac_get_prob_branchy(&s->c, 128)) { // marker bit
877  av_log(avctx, AV_LOG_ERROR, "Marker bit was set\n");
878  return AVERROR_INVALIDDATA;
879  }
880 
881  for (i = 0; i < s->active_tile_cols; i++) {
882  if (s->s.h.keyframe || s->s.h.intraonly) {
883  memset(s->td[i].counts.coef, 0, sizeof(s->td[0].counts.coef));
884  memset(s->td[i].counts.eob, 0, sizeof(s->td[0].counts.eob));
885  } else {
886  memset(&s->td[i].counts, 0, sizeof(s->td[0].counts));
887  }
888  s->td[i].nb_block_structure = 0;
889  }
890 
891  /* FIXME is it faster to not copy here, but do it down in the fw updates
892  * as explicit copies if the fw update is missing (and skip the copy upon
893  * fw update)? */
894  s->prob.p = s->prob_ctx[c].p;
895 
896  // txfm updates
897  if (s->s.h.lossless) {
898  s->s.h.txfmmode = TX_4X4;
899  } else {
900  s->s.h.txfmmode = vp8_rac_get_uint(&s->c, 2);
901  if (s->s.h.txfmmode == 3)
902  s->s.h.txfmmode += vp8_rac_get(&s->c);
903 
904  if (s->s.h.txfmmode == TX_SWITCHABLE) {
905  for (i = 0; i < 2; i++)
906  if (vp56_rac_get_prob_branchy(&s->c, 252))
907  s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i]);
908  for (i = 0; i < 2; i++)
909  for (j = 0; j < 2; j++)
910  if (vp56_rac_get_prob_branchy(&s->c, 252))
911  s->prob.p.tx16p[i][j] =
912  update_prob(&s->c, s->prob.p.tx16p[i][j]);
913  for (i = 0; i < 2; i++)
914  for (j = 0; j < 3; j++)
915  if (vp56_rac_get_prob_branchy(&s->c, 252))
916  s->prob.p.tx32p[i][j] =
917  update_prob(&s->c, s->prob.p.tx32p[i][j]);
918  }
919  }
920 
921  // coef updates
922  for (i = 0; i < 4; i++) {
923  uint8_t (*ref)[2][6][6][3] = s->prob_ctx[c].coef[i];
924  if (vp8_rac_get(&s->c)) {
925  for (j = 0; j < 2; j++)
926  for (k = 0; k < 2; k++)
927  for (l = 0; l < 6; l++)
928  for (m = 0; m < 6; m++) {
929  uint8_t *p = s->prob.coef[i][j][k][l][m];
930  uint8_t *r = ref[j][k][l][m];
931  if (m >= 3 && l == 0) // dc only has 3 pt
932  break;
933  for (n = 0; n < 3; n++) {
934  if (vp56_rac_get_prob_branchy(&s->c, 252))
935  p[n] = update_prob(&s->c, r[n]);
936  else
937  p[n] = r[n];
938  }
939  memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
940  }
941  } else {
942  for (j = 0; j < 2; j++)
943  for (k = 0; k < 2; k++)
944  for (l = 0; l < 6; l++)
945  for (m = 0; m < 6; m++) {
946  uint8_t *p = s->prob.coef[i][j][k][l][m];
947  uint8_t *r = ref[j][k][l][m];
948  if (m > 3 && l == 0) // dc only has 3 pt
949  break;
950  memcpy(p, r, 3);
951  memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
952  }
953  }
954  if (s->s.h.txfmmode == i)
955  break;
956  }
957 
958  // mode updates
959  for (i = 0; i < 3; i++)
960  if (vp56_rac_get_prob_branchy(&s->c, 252))
961  s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i]);
962  if (!s->s.h.keyframe && !s->s.h.intraonly) {
963  for (i = 0; i < 7; i++)
964  for (j = 0; j < 3; j++)
965  if (vp56_rac_get_prob_branchy(&s->c, 252))
966  s->prob.p.mv_mode[i][j] =
967  update_prob(&s->c, s->prob.p.mv_mode[i][j]);
968 
969  if (s->s.h.filtermode == FILTER_SWITCHABLE)
970  for (i = 0; i < 4; i++)
971  for (j = 0; j < 2; j++)
972  if (vp56_rac_get_prob_branchy(&s->c, 252))
973  s->prob.p.filter[i][j] =
974  update_prob(&s->c, s->prob.p.filter[i][j]);
975 
976  for (i = 0; i < 4; i++)
977  if (vp56_rac_get_prob_branchy(&s->c, 252))
978  s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i]);
979 
980  if (s->s.h.allowcompinter) {
981  s->s.h.comppredmode = vp8_rac_get(&s->c);
982  if (s->s.h.comppredmode)
983  s->s.h.comppredmode += vp8_rac_get(&s->c);
984  if (s->s.h.comppredmode == PRED_SWITCHABLE)
985  for (i = 0; i < 5; i++)
986  if (vp56_rac_get_prob_branchy(&s->c, 252))
987  s->prob.p.comp[i] =
988  update_prob(&s->c, s->prob.p.comp[i]);
989  } else {
990  s->s.h.comppredmode = PRED_SINGLEREF;
991  }
992 
993  if (s->s.h.comppredmode != PRED_COMPREF) {
994  for (i = 0; i < 5; i++) {
995  if (vp56_rac_get_prob_branchy(&s->c, 252))
996  s->prob.p.single_ref[i][0] =
997  update_prob(&s->c, s->prob.p.single_ref[i][0]);
998  if (vp56_rac_get_prob_branchy(&s->c, 252))
999  s->prob.p.single_ref[i][1] =
1000  update_prob(&s->c, s->prob.p.single_ref[i][1]);
1001  }
1002  }
1003 
1004  if (s->s.h.comppredmode != PRED_SINGLEREF) {
1005  for (i = 0; i < 5; i++)
1006  if (vp56_rac_get_prob_branchy(&s->c, 252))
1007  s->prob.p.comp_ref[i] =
1008  update_prob(&s->c, s->prob.p.comp_ref[i]);
1009  }
1010 
1011  for (i = 0; i < 4; i++)
1012  for (j = 0; j < 9; j++)
1013  if (vp56_rac_get_prob_branchy(&s->c, 252))
1014  s->prob.p.y_mode[i][j] =
1015  update_prob(&s->c, s->prob.p.y_mode[i][j]);
1016 
1017  for (i = 0; i < 4; i++)
1018  for (j = 0; j < 4; j++)
1019  for (k = 0; k < 3; k++)
1020  if (vp56_rac_get_prob_branchy(&s->c, 252))
1021  s->prob.p.partition[3 - i][j][k] =
1022  update_prob(&s->c,
1023  s->prob.p.partition[3 - i][j][k]);
1024 
1025  // mv fields don't use the update_prob subexp model for some reason
1026  for (i = 0; i < 3; i++)
1027  if (vp56_rac_get_prob_branchy(&s->c, 252))
1028  s->prob.p.mv_joint[i] = (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
1029 
1030  for (i = 0; i < 2; i++) {
1031  if (vp56_rac_get_prob_branchy(&s->c, 252))
1032  s->prob.p.mv_comp[i].sign =
1033  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
1034 
1035  for (j = 0; j < 10; j++)
1036  if (vp56_rac_get_prob_branchy(&s->c, 252))
1037  s->prob.p.mv_comp[i].classes[j] =
1038  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
1039 
1040  if (vp56_rac_get_prob_branchy(&s->c, 252))
1041  s->prob.p.mv_comp[i].class0 =
1042  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
1043 
1044  for (j = 0; j < 10; j++)
1045  if (vp56_rac_get_prob_branchy(&s->c, 252))
1046  s->prob.p.mv_comp[i].bits[j] =
1047  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
1048  }
1049 
1050  for (i = 0; i < 2; i++) {
1051  for (j = 0; j < 2; j++)
1052  for (k = 0; k < 3; k++)
1053  if (vp56_rac_get_prob_branchy(&s->c, 252))
1054  s->prob.p.mv_comp[i].class0_fp[j][k] =
1055  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
1056 
1057  for (j = 0; j < 3; j++)
1058  if (vp56_rac_get_prob_branchy(&s->c, 252))
1059  s->prob.p.mv_comp[i].fp[j] =
1060  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
1061  }
1062 
1063  if (s->s.h.highprecisionmvs) {
1064  for (i = 0; i < 2; i++) {
1065  if (vp56_rac_get_prob_branchy(&s->c, 252))
1066  s->prob.p.mv_comp[i].class0_hp =
1067  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
1068 
1069  if (vp56_rac_get_prob_branchy(&s->c, 252))
1070  s->prob.p.mv_comp[i].hp =
1071  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
1072  }
1073  }
1074  }
1075 
1076  return (data2 - data) + size2;
1077 }
1078 
1079 static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl,
1080  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1081 {
1082  const VP9Context *s = td->s;
1083  int c = ((s->above_partition_ctx[col] >> (3 - bl)) & 1) |
1084  (((td->left_partition_ctx[row & 0x7] >> (3 - bl)) & 1) << 1);
1085  const uint8_t *p = s->s.h.keyframe || s->s.h.intraonly ? ff_vp9_default_kf_partition_probs[bl][c] :
1086  s->prob.p.partition[bl][c];
1087  enum BlockPartition bp;
1088  ptrdiff_t hbs = 4 >> bl;
1089  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1090  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1091  int bytesperpixel = s->bytesperpixel;
1092 
1093  if (bl == BL_8X8) {
1095  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1096  } else if (col + hbs < s->cols) { // FIXME why not <=?
1097  if (row + hbs < s->rows) { // FIXME why not <=?
1099  switch (bp) {
1100  case PARTITION_NONE:
1101  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1102  break;
1103  case PARTITION_H:
1104  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1105  yoff += hbs * 8 * y_stride;
1106  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1107  ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, bl, bp);
1108  break;
1109  case PARTITION_V:
1110  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1111  yoff += hbs * 8 * bytesperpixel;
1112  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1113  ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, bl, bp);
1114  break;
1115  case PARTITION_SPLIT:
1116  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1117  decode_sb(td, row, col + hbs, lflvl,
1118  yoff + 8 * hbs * bytesperpixel,
1119  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1120  yoff += hbs * 8 * y_stride;
1121  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1122  decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1123  decode_sb(td, row + hbs, col + hbs, lflvl,
1124  yoff + 8 * hbs * bytesperpixel,
1125  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1126  break;
1127  default:
1128  av_assert0(0);
1129  }
1130  } else if (vp56_rac_get_prob_branchy(td->c, p[1])) {
1131  bp = PARTITION_SPLIT;
1132  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1133  decode_sb(td, row, col + hbs, lflvl,
1134  yoff + 8 * hbs * bytesperpixel,
1135  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1136  } else {
1137  bp = PARTITION_H;
1138  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1139  }
1140  } else if (row + hbs < s->rows) { // FIXME why not <=?
1141  if (vp56_rac_get_prob_branchy(td->c, p[2])) {
1142  bp = PARTITION_SPLIT;
1143  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1144  yoff += hbs * 8 * y_stride;
1145  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1146  decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1147  } else {
1148  bp = PARTITION_V;
1149  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1150  }
1151  } else {
1152  bp = PARTITION_SPLIT;
1153  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1154  }
1155  td->counts.partition[bl][c][bp]++;
1156 }
1157 
1158 static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl,
1159  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1160 {
1161  const VP9Context *s = td->s;
1162  VP9Block *b = td->b;
1163  ptrdiff_t hbs = 4 >> bl;
1164  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1165  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1166  int bytesperpixel = s->bytesperpixel;
1167 
1168  if (bl == BL_8X8) {
1169  av_assert2(b->bl == BL_8X8);
1170  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1171  } else if (td->b->bl == bl) {
1172  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1173  if (b->bp == PARTITION_H && row + hbs < s->rows) {
1174  yoff += hbs * 8 * y_stride;
1175  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1176  ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, b->bl, b->bp);
1177  } else if (b->bp == PARTITION_V && col + hbs < s->cols) {
1178  yoff += hbs * 8 * bytesperpixel;
1179  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1180  ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, b->bl, b->bp);
1181  }
1182  } else {
1183  decode_sb_mem(td, row, col, lflvl, yoff, uvoff, bl + 1);
1184  if (col + hbs < s->cols) { // FIXME why not <=?
1185  if (row + hbs < s->rows) {
1186  decode_sb_mem(td, row, col + hbs, lflvl, yoff + 8 * hbs * bytesperpixel,
1187  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1188  yoff += hbs * 8 * y_stride;
1189  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1190  decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1191  decode_sb_mem(td, row + hbs, col + hbs, lflvl,
1192  yoff + 8 * hbs * bytesperpixel,
1193  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1194  } else {
1195  yoff += hbs * 8 * bytesperpixel;
1196  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1197  decode_sb_mem(td, row, col + hbs, lflvl, yoff, uvoff, bl + 1);
1198  }
1199  } else if (row + hbs < s->rows) {
1200  yoff += hbs * 8 * y_stride;
1201  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1202  decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1203  }
1204  }
1205 }
1206 
1207 static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
1208 {
1209  int sb_start = ( idx * n) >> log2_n;
1210  int sb_end = ((idx + 1) * n) >> log2_n;
1211  *start = FFMIN(sb_start, n) << 3;
1212  *end = FFMIN(sb_end, n) << 3;
1213 }
1214 
1216 {
1217  int i;
1218 
1219  av_freep(&s->intra_pred_data[0]);
1220  for (i = 0; i < s->active_tile_cols; i++)
1221  vp9_tile_data_free(&s->td[i]);
1222 }
1223 
1225 {
1226  VP9Context *s = avctx->priv_data;
1227  int i;
1228 
1229  for (i = 0; i < 3; i++) {
1230  vp9_frame_unref(avctx, &s->s.frames[i]);
1231  av_frame_free(&s->s.frames[i].tf.f);
1232  }
1233  av_buffer_pool_uninit(&s->frame_extradata_pool);
1234  for (i = 0; i < 8; i++) {
1235  ff_thread_release_buffer(avctx, &s->s.refs[i]);
1236  av_frame_free(&s->s.refs[i].f);
1237  ff_thread_release_buffer(avctx, &s->next_refs[i]);
1238  av_frame_free(&s->next_refs[i].f);
1239  }
1240 
1241  free_buffers(s);
1242 #if HAVE_THREADS
1243  av_freep(&s->entries);
1244  ff_pthread_free(s, vp9_context_offsets);
1245 #endif
1246  av_freep(&s->td);
1247  return 0;
1248 }
1249 
1250 static int decode_tiles(AVCodecContext *avctx,
1251  const uint8_t *data, int size)
1252 {
1253  VP9Context *s = avctx->priv_data;
1254  VP9TileData *td = &s->td[0];
1255  int row, col, tile_row, tile_col, ret;
1256  int bytesperpixel;
1257  int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1258  AVFrame *f;
1259  ptrdiff_t yoff, uvoff, ls_y, ls_uv;
1260 
1261  f = s->s.frames[CUR_FRAME].tf.f;
1262  ls_y = f->linesize[0];
1263  ls_uv =f->linesize[1];
1264  bytesperpixel = s->bytesperpixel;
1265 
1266  yoff = uvoff = 0;
1267  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1268  set_tile_offset(&tile_row_start, &tile_row_end,
1269  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1270 
1271  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1272  int64_t tile_size;
1273 
1274  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1275  tile_row == s->s.h.tiling.tile_rows - 1) {
1276  tile_size = size;
1277  } else {
1278  tile_size = AV_RB32(data);
1279  data += 4;
1280  size -= 4;
1281  }
1282  if (tile_size > size) {
1283  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1284  return AVERROR_INVALIDDATA;
1285  }
1286  ret = ff_vp56_init_range_decoder(&td->c_b[tile_col], data, tile_size);
1287  if (ret < 0)
1288  return ret;
1289  if (vp56_rac_get_prob_branchy(&td->c_b[tile_col], 128)) { // marker bit
1290  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1291  return AVERROR_INVALIDDATA;
1292  }
1293  data += tile_size;
1294  size -= tile_size;
1295  }
1296 
1297  for (row = tile_row_start; row < tile_row_end;
1298  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1299  VP9Filter *lflvl_ptr = s->lflvl;
1300  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1301 
1302  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1303  set_tile_offset(&tile_col_start, &tile_col_end,
1304  tile_col, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1305  td->tile_col_start = tile_col_start;
1306  if (s->pass != 2) {
1307  memset(td->left_partition_ctx, 0, 8);
1308  memset(td->left_skip_ctx, 0, 8);
1309  if (s->s.h.keyframe || s->s.h.intraonly) {
1310  memset(td->left_mode_ctx, DC_PRED, 16);
1311  } else {
1312  memset(td->left_mode_ctx, NEARESTMV, 8);
1313  }
1314  memset(td->left_y_nnz_ctx, 0, 16);
1315  memset(td->left_uv_nnz_ctx, 0, 32);
1316  memset(td->left_segpred_ctx, 0, 8);
1317 
1318  td->c = &td->c_b[tile_col];
1319  }
1320 
1321  for (col = tile_col_start;
1322  col < tile_col_end;
1323  col += 8, yoff2 += 64 * bytesperpixel,
1324  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1325  // FIXME integrate with lf code (i.e. zero after each
1326  // use, similar to invtxfm coefficients, or similar)
1327  if (s->pass != 1) {
1328  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1329  }
1330 
1331  if (s->pass == 2) {
1332  decode_sb_mem(td, row, col, lflvl_ptr,
1333  yoff2, uvoff2, BL_64X64);
1334  } else {
1335  if (vpX_rac_is_end(td->c)) {
1336  return AVERROR_INVALIDDATA;
1337  }
1338  decode_sb(td, row, col, lflvl_ptr,
1339  yoff2, uvoff2, BL_64X64);
1340  }
1341  }
1342  }
1343 
1344  if (s->pass == 1)
1345  continue;
1346 
1347  // backup pre-loopfilter reconstruction data for intra
1348  // prediction of next row of sb64s
1349  if (row + 8 < s->rows) {
1350  memcpy(s->intra_pred_data[0],
1351  f->data[0] + yoff + 63 * ls_y,
1352  8 * s->cols * bytesperpixel);
1353  memcpy(s->intra_pred_data[1],
1354  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1355  8 * s->cols * bytesperpixel >> s->ss_h);
1356  memcpy(s->intra_pred_data[2],
1357  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1358  8 * s->cols * bytesperpixel >> s->ss_h);
1359  }
1360 
1361  // loopfilter one row
1362  if (s->s.h.filter.level) {
1363  yoff2 = yoff;
1364  uvoff2 = uvoff;
1365  lflvl_ptr = s->lflvl;
1366  for (col = 0; col < s->cols;
1367  col += 8, yoff2 += 64 * bytesperpixel,
1368  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1369  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, row, col,
1370  yoff2, uvoff2);
1371  }
1372  }
1373 
1374  // FIXME maybe we can make this more finegrained by running the
1375  // loopfilter per-block instead of after each sbrow
1376  // In fact that would also make intra pred left preparation easier?
1377  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, row >> 3, 0);
1378  }
1379  }
1380  return 0;
1381 }
1382 
1383 #if HAVE_THREADS
1384 static av_always_inline
1385 int decode_tiles_mt(AVCodecContext *avctx, void *tdata, int jobnr,
1386  int threadnr)
1387 {
1388  VP9Context *s = avctx->priv_data;
1389  VP9TileData *td = &s->td[jobnr];
1390  ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1391  int bytesperpixel = s->bytesperpixel, row, col, tile_row;
1392  unsigned tile_cols_len;
1393  int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1394  VP9Filter *lflvl_ptr_base;
1395  AVFrame *f;
1396 
1397  f = s->s.frames[CUR_FRAME].tf.f;
1398  ls_y = f->linesize[0];
1399  ls_uv =f->linesize[1];
1400 
1401  set_tile_offset(&tile_col_start, &tile_col_end,
1402  jobnr, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1403  td->tile_col_start = tile_col_start;
1404  uvoff = (64 * bytesperpixel >> s->ss_h)*(tile_col_start >> 3);
1405  yoff = (64 * bytesperpixel)*(tile_col_start >> 3);
1406  lflvl_ptr_base = s->lflvl+(tile_col_start >> 3);
1407 
1408  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1409  set_tile_offset(&tile_row_start, &tile_row_end,
1410  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1411 
1412  td->c = &td->c_b[tile_row];
1413  for (row = tile_row_start; row < tile_row_end;
1414  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1415  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1416  VP9Filter *lflvl_ptr = lflvl_ptr_base+s->sb_cols*(row >> 3);
1417 
1418  memset(td->left_partition_ctx, 0, 8);
1419  memset(td->left_skip_ctx, 0, 8);
1420  if (s->s.h.keyframe || s->s.h.intraonly) {
1421  memset(td->left_mode_ctx, DC_PRED, 16);
1422  } else {
1423  memset(td->left_mode_ctx, NEARESTMV, 8);
1424  }
1425  memset(td->left_y_nnz_ctx, 0, 16);
1426  memset(td->left_uv_nnz_ctx, 0, 32);
1427  memset(td->left_segpred_ctx, 0, 8);
1428 
1429  for (col = tile_col_start;
1430  col < tile_col_end;
1431  col += 8, yoff2 += 64 * bytesperpixel,
1432  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1433  // FIXME integrate with lf code (i.e. zero after each
1434  // use, similar to invtxfm coefficients, or similar)
1435  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1436  decode_sb(td, row, col, lflvl_ptr,
1437  yoff2, uvoff2, BL_64X64);
1438  }
1439 
1440  // backup pre-loopfilter reconstruction data for intra
1441  // prediction of next row of sb64s
1442  tile_cols_len = tile_col_end - tile_col_start;
1443  if (row + 8 < s->rows) {
1444  memcpy(s->intra_pred_data[0] + (tile_col_start * 8 * bytesperpixel),
1445  f->data[0] + yoff + 63 * ls_y,
1446  8 * tile_cols_len * bytesperpixel);
1447  memcpy(s->intra_pred_data[1] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
1448  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1449  8 * tile_cols_len * bytesperpixel >> s->ss_h);
1450  memcpy(s->intra_pred_data[2] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
1451  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1452  8 * tile_cols_len * bytesperpixel >> s->ss_h);
1453  }
1454 
1455  vp9_report_tile_progress(s, row >> 3, 1);
1456  }
1457  }
1458  return 0;
1459 }
1460 
1461 static av_always_inline
1462 int loopfilter_proc(AVCodecContext *avctx)
1463 {
1464  VP9Context *s = avctx->priv_data;
1465  ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1466  VP9Filter *lflvl_ptr;
1467  int bytesperpixel = s->bytesperpixel, col, i;
1468  AVFrame *f;
1469 
1470  f = s->s.frames[CUR_FRAME].tf.f;
1471  ls_y = f->linesize[0];
1472  ls_uv =f->linesize[1];
1473 
1474  for (i = 0; i < s->sb_rows; i++) {
1475  vp9_await_tile_progress(s, i, s->s.h.tiling.tile_cols);
1476 
1477  if (s->s.h.filter.level) {
1478  yoff = (ls_y * 64)*i;
1479  uvoff = (ls_uv * 64 >> s->ss_v)*i;
1480  lflvl_ptr = s->lflvl+s->sb_cols*i;
1481  for (col = 0; col < s->cols;
1482  col += 8, yoff += 64 * bytesperpixel,
1483  uvoff += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1484  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, i << 3, col,
1485  yoff, uvoff);
1486  }
1487  }
1488  }
1489  return 0;
1490 }
1491 #endif
1492 
1494 {
1495  AVVideoEncParams *par;
1496  unsigned int tile, nb_blocks = 0;
1497 
1498  if (s->s.h.segmentation.enabled) {
1499  for (tile = 0; tile < s->active_tile_cols; tile++)
1500  nb_blocks += s->td[tile].nb_block_structure;
1501  }
1502 
1504  AV_VIDEO_ENC_PARAMS_VP9, nb_blocks);
1505  if (!par)
1506  return AVERROR(ENOMEM);
1507 
1508  par->qp = s->s.h.yac_qi;
1509  par->delta_qp[0][0] = s->s.h.ydc_qdelta;
1510  par->delta_qp[1][0] = s->s.h.uvdc_qdelta;
1511  par->delta_qp[2][0] = s->s.h.uvdc_qdelta;
1512  par->delta_qp[1][1] = s->s.h.uvac_qdelta;
1513  par->delta_qp[2][1] = s->s.h.uvac_qdelta;
1514 
1515  if (nb_blocks) {
1516  unsigned int block = 0;
1517  unsigned int tile, block_tile;
1518 
1519  for (tile = 0; tile < s->active_tile_cols; tile++) {
1520  VP9TileData *td = &s->td[tile];
1521 
1522  for (block_tile = 0; block_tile < td->nb_block_structure; block_tile++) {
1524  unsigned int row = td->block_structure[block_tile].row;
1525  unsigned int col = td->block_structure[block_tile].col;
1526  uint8_t seg_id = frame->segmentation_map[row * 8 * s->sb_cols + col];
1527 
1528  b->src_x = col * 8;
1529  b->src_y = row * 8;
1530  b->w = 1 << (3 + td->block_structure[block_tile].block_size_idx_x);
1531  b->h = 1 << (3 + td->block_structure[block_tile].block_size_idx_y);
1532 
1533  if (s->s.h.segmentation.feat[seg_id].q_enabled) {
1534  b->delta_qp = s->s.h.segmentation.feat[seg_id].q_val;
1535  if (s->s.h.segmentation.absolute_vals)
1536  b->delta_qp -= par->qp;
1537  }
1538  }
1539  }
1540  }
1541 
1542  return 0;
1543 }
1544 
1545 static int vp9_decode_frame(AVCodecContext *avctx, void *frame,
1546  int *got_frame, AVPacket *pkt)
1547 {
1548  const uint8_t *data = pkt->data;
1549  int size = pkt->size;
1550  VP9Context *s = avctx->priv_data;
1551  int ret, i, j, ref;
1552  int retain_segmap_ref = s->s.frames[REF_FRAME_SEGMAP].segmentation_map &&
1553  (!s->s.h.segmentation.enabled || !s->s.h.segmentation.update_map);
1554  AVFrame *f;
1555 
1556  if ((ret = decode_frame_header(avctx, data, size, &ref)) < 0) {
1557  return ret;
1558  } else if (ret == 0) {
1559  if (!s->s.refs[ref].f->buf[0]) {
1560  av_log(avctx, AV_LOG_ERROR, "Requested reference %d not available\n", ref);
1561  return AVERROR_INVALIDDATA;
1562  }
1563  if ((ret = av_frame_ref(frame, s->s.refs[ref].f)) < 0)
1564  return ret;
1565  ((AVFrame *)frame)->pts = pkt->pts;
1566  ((AVFrame *)frame)->pkt_dts = pkt->dts;
1567  for (i = 0; i < 8; i++) {
1568  if (s->next_refs[i].f->buf[0])
1569  ff_thread_release_buffer(avctx, &s->next_refs[i]);
1570  if (s->s.refs[i].f->buf[0] &&
1571  (ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i])) < 0)
1572  return ret;
1573  }
1574  *got_frame = 1;
1575  return pkt->size;
1576  }
1577  data += ret;
1578  size -= ret;
1579 
1580  if (!retain_segmap_ref || s->s.h.keyframe || s->s.h.intraonly) {
1581  if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0])
1582  vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_SEGMAP]);
1583  if (!s->s.h.keyframe && !s->s.h.intraonly && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
1584  (ret = vp9_frame_ref(avctx, &s->s.frames[REF_FRAME_SEGMAP], &s->s.frames[CUR_FRAME])) < 0)
1585  return ret;
1586  }
1587  if (s->s.frames[REF_FRAME_MVPAIR].tf.f->buf[0])
1588  vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_MVPAIR]);
1589  if (!s->s.h.intraonly && !s->s.h.keyframe && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
1590  (ret = vp9_frame_ref(avctx, &s->s.frames[REF_FRAME_MVPAIR], &s->s.frames[CUR_FRAME])) < 0)
1591  return ret;
1592  if (s->s.frames[CUR_FRAME].tf.f->buf[0])
1593  vp9_frame_unref(avctx, &s->s.frames[CUR_FRAME]);
1594  if ((ret = vp9_frame_alloc(avctx, &s->s.frames[CUR_FRAME])) < 0)
1595  return ret;
1596  f = s->s.frames[CUR_FRAME].tf.f;
1597  f->key_frame = s->s.h.keyframe;
1598  f->pict_type = (s->s.h.keyframe || s->s.h.intraonly) ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1599 
1600  if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0] &&
1601  (s->s.frames[REF_FRAME_MVPAIR].tf.f->width != s->s.frames[CUR_FRAME].tf.f->width ||
1602  s->s.frames[REF_FRAME_MVPAIR].tf.f->height != s->s.frames[CUR_FRAME].tf.f->height)) {
1603  vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_SEGMAP]);
1604  }
1605 
1606  // ref frame setup
1607  for (i = 0; i < 8; i++) {
1608  if (s->next_refs[i].f->buf[0])
1609  ff_thread_release_buffer(avctx, &s->next_refs[i]);
1610  if (s->s.h.refreshrefmask & (1 << i)) {
1611  ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.frames[CUR_FRAME].tf);
1612  } else if (s->s.refs[i].f->buf[0]) {
1613  ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i]);
1614  }
1615  if (ret < 0)
1616  return ret;
1617  }
1618 
1619  if (avctx->hwaccel) {
1620  ret = avctx->hwaccel->start_frame(avctx, NULL, 0);
1621  if (ret < 0)
1622  return ret;
1623  ret = avctx->hwaccel->decode_slice(avctx, pkt->data, pkt->size);
1624  if (ret < 0)
1625  return ret;
1626  ret = avctx->hwaccel->end_frame(avctx);
1627  if (ret < 0)
1628  return ret;
1629  goto finish;
1630  }
1631 
1632  // main tile decode loop
1633  memset(s->above_partition_ctx, 0, s->cols);
1634  memset(s->above_skip_ctx, 0, s->cols);
1635  if (s->s.h.keyframe || s->s.h.intraonly) {
1636  memset(s->above_mode_ctx, DC_PRED, s->cols * 2);
1637  } else {
1638  memset(s->above_mode_ctx, NEARESTMV, s->cols);
1639  }
1640  memset(s->above_y_nnz_ctx, 0, s->sb_cols * 16);
1641  memset(s->above_uv_nnz_ctx[0], 0, s->sb_cols * 16 >> s->ss_h);
1642  memset(s->above_uv_nnz_ctx[1], 0, s->sb_cols * 16 >> s->ss_h);
1643  memset(s->above_segpred_ctx, 0, s->cols);
1644  s->pass = s->s.frames[CUR_FRAME].uses_2pass =
1645  avctx->active_thread_type == FF_THREAD_FRAME && s->s.h.refreshctx && !s->s.h.parallelmode;
1646  if ((ret = update_block_buffers(avctx)) < 0) {
1647  av_log(avctx, AV_LOG_ERROR,
1648  "Failed to allocate block buffers\n");
1649  return ret;
1650  }
1651  if (s->s.h.refreshctx && s->s.h.parallelmode) {
1652  int j, k, l, m;
1653 
1654  for (i = 0; i < 4; i++) {
1655  for (j = 0; j < 2; j++)
1656  for (k = 0; k < 2; k++)
1657  for (l = 0; l < 6; l++)
1658  for (m = 0; m < 6; m++)
1659  memcpy(s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m],
1660  s->prob.coef[i][j][k][l][m], 3);
1661  if (s->s.h.txfmmode == i)
1662  break;
1663  }
1664  s->prob_ctx[s->s.h.framectxid].p = s->prob.p;
1665  ff_thread_finish_setup(avctx);
1666  } else if (!s->s.h.refreshctx) {
1667  ff_thread_finish_setup(avctx);
1668  }
1669 
1670 #if HAVE_THREADS
1671  if (avctx->active_thread_type & FF_THREAD_SLICE) {
1672  for (i = 0; i < s->sb_rows; i++)
1673  atomic_store(&s->entries[i], 0);
1674  }
1675 #endif
1676 
1677  do {
1678  for (i = 0; i < s->active_tile_cols; i++) {
1679  s->td[i].b = s->td[i].b_base;
1680  s->td[i].block = s->td[i].block_base;
1681  s->td[i].uvblock[0] = s->td[i].uvblock_base[0];
1682  s->td[i].uvblock[1] = s->td[i].uvblock_base[1];
1683  s->td[i].eob = s->td[i].eob_base;
1684  s->td[i].uveob[0] = s->td[i].uveob_base[0];
1685  s->td[i].uveob[1] = s->td[i].uveob_base[1];
1686  s->td[i].error_info = 0;
1687  }
1688 
1689 #if HAVE_THREADS
1690  if (avctx->active_thread_type == FF_THREAD_SLICE) {
1691  int tile_row, tile_col;
1692 
1693  av_assert1(!s->pass);
1694 
1695  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1696  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1697  int64_t tile_size;
1698 
1699  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1700  tile_row == s->s.h.tiling.tile_rows - 1) {
1701  tile_size = size;
1702  } else {
1703  tile_size = AV_RB32(data);
1704  data += 4;
1705  size -= 4;
1706  }
1707  if (tile_size > size)
1708  return AVERROR_INVALIDDATA;
1709  ret = ff_vp56_init_range_decoder(&s->td[tile_col].c_b[tile_row], data, tile_size);
1710  if (ret < 0)
1711  return ret;
1712  if (vp56_rac_get_prob_branchy(&s->td[tile_col].c_b[tile_row], 128)) // marker bit
1713  return AVERROR_INVALIDDATA;
1714  data += tile_size;
1715  size -= tile_size;
1716  }
1717  }
1718 
1719  ff_slice_thread_execute_with_mainfunc(avctx, decode_tiles_mt, loopfilter_proc, s->td, NULL, s->s.h.tiling.tile_cols);
1720  } else
1721 #endif
1722  {
1723  ret = decode_tiles(avctx, data, size);
1724  if (ret < 0) {
1725  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1726  return ret;
1727  }
1728  }
1729 
1730  // Sum all counts fields into td[0].counts for tile threading
1731  if (avctx->active_thread_type == FF_THREAD_SLICE)
1732  for (i = 1; i < s->s.h.tiling.tile_cols; i++)
1733  for (j = 0; j < sizeof(s->td[i].counts) / sizeof(unsigned); j++)
1734  ((unsigned *)&s->td[0].counts)[j] += ((unsigned *)&s->td[i].counts)[j];
1735 
1736  if (s->pass < 2 && s->s.h.refreshctx && !s->s.h.parallelmode) {
1738  ff_thread_finish_setup(avctx);
1739  }
1740  } while (s->pass++ == 1);
1741  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1742 
1743  if (s->td->error_info < 0) {
1744  av_log(avctx, AV_LOG_ERROR, "Failed to decode tile data\n");
1745  s->td->error_info = 0;
1746  return AVERROR_INVALIDDATA;
1747  }
1749  ret = vp9_export_enc_params(s, &s->s.frames[CUR_FRAME]);
1750  if (ret < 0)
1751  return ret;
1752  }
1753 
1754 finish:
1755  // ref frame setup
1756  for (i = 0; i < 8; i++) {
1757  if (s->s.refs[i].f->buf[0])
1758  ff_thread_release_buffer(avctx, &s->s.refs[i]);
1759  if (s->next_refs[i].f->buf[0] &&
1760  (ret = ff_thread_ref_frame(&s->s.refs[i], &s->next_refs[i])) < 0)
1761  return ret;
1762  }
1763 
1764  if (!s->s.h.invisible) {
1765  if ((ret = av_frame_ref(frame, s->s.frames[CUR_FRAME].tf.f)) < 0)
1766  return ret;
1767  *got_frame = 1;
1768  }
1769 
1770  return pkt->size;
1771 }
1772 
1774 {
1775  VP9Context *s = avctx->priv_data;
1776  int i;
1777 
1778  for (i = 0; i < 3; i++)
1779  vp9_frame_unref(avctx, &s->s.frames[i]);
1780  for (i = 0; i < 8; i++)
1781  ff_thread_release_buffer(avctx, &s->s.refs[i]);
1782 }
1783 
1785 {
1786  VP9Context *s = avctx->priv_data;
1787  int ret;
1788 
1789  s->last_bpp = 0;
1790  s->s.h.filter.sharpness = -1;
1791 
1792 #if HAVE_THREADS
1793  if (avctx->active_thread_type & FF_THREAD_SLICE) {
1794  ret = ff_pthread_init(s, vp9_context_offsets);
1795  if (ret < 0)
1796  return ret;
1797  }
1798 #endif
1799 
1800  for (int i = 0; i < 3; i++) {
1801  s->s.frames[i].tf.f = av_frame_alloc();
1802  if (!s->s.frames[i].tf.f)
1803  return AVERROR(ENOMEM);
1804  }
1805  for (int i = 0; i < 8; i++) {
1806  s->s.refs[i].f = av_frame_alloc();
1807  s->next_refs[i].f = av_frame_alloc();
1808  if (!s->s.refs[i].f || !s->next_refs[i].f)
1809  return AVERROR(ENOMEM);
1810  }
1811  return 0;
1812 }
1813 
1814 #if HAVE_THREADS
1815 static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
1816 {
1817  int i, ret;
1818  VP9Context *s = dst->priv_data, *ssrc = src->priv_data;
1819 
1820  for (i = 0; i < 3; i++) {
1821  if (s->s.frames[i].tf.f->buf[0])
1822  vp9_frame_unref(dst, &s->s.frames[i]);
1823  if (ssrc->s.frames[i].tf.f->buf[0]) {
1824  if ((ret = vp9_frame_ref(dst, &s->s.frames[i], &ssrc->s.frames[i])) < 0)
1825  return ret;
1826  }
1827  }
1828  for (i = 0; i < 8; i++) {
1829  if (s->s.refs[i].f->buf[0])
1830  ff_thread_release_buffer(dst, &s->s.refs[i]);
1831  if (ssrc->next_refs[i].f->buf[0]) {
1832  if ((ret = ff_thread_ref_frame(&s->s.refs[i], &ssrc->next_refs[i])) < 0)
1833  return ret;
1834  }
1835  }
1836 
1837  s->s.h.invisible = ssrc->s.h.invisible;
1838  s->s.h.keyframe = ssrc->s.h.keyframe;
1839  s->s.h.intraonly = ssrc->s.h.intraonly;
1840  s->ss_v = ssrc->ss_v;
1841  s->ss_h = ssrc->ss_h;
1842  s->s.h.segmentation.enabled = ssrc->s.h.segmentation.enabled;
1843  s->s.h.segmentation.update_map = ssrc->s.h.segmentation.update_map;
1844  s->s.h.segmentation.absolute_vals = ssrc->s.h.segmentation.absolute_vals;
1845  s->bytesperpixel = ssrc->bytesperpixel;
1846  s->gf_fmt = ssrc->gf_fmt;
1847  s->w = ssrc->w;
1848  s->h = ssrc->h;
1849  s->s.h.bpp = ssrc->s.h.bpp;
1850  s->bpp_index = ssrc->bpp_index;
1851  s->pix_fmt = ssrc->pix_fmt;
1852  memcpy(&s->prob_ctx, &ssrc->prob_ctx, sizeof(s->prob_ctx));
1853  memcpy(&s->s.h.lf_delta, &ssrc->s.h.lf_delta, sizeof(s->s.h.lf_delta));
1854  memcpy(&s->s.h.segmentation.feat, &ssrc->s.h.segmentation.feat,
1855  sizeof(s->s.h.segmentation.feat));
1856 
1857  return 0;
1858 }
1859 #endif
1860 
1862  .name = "vp9",
1863  .long_name = NULL_IF_CONFIG_SMALL("Google VP9"),
1864  .type = AVMEDIA_TYPE_VIDEO,
1865  .id = AV_CODEC_ID_VP9,
1866  .priv_data_size = sizeof(VP9Context),
1867  .init = vp9_decode_init,
1868  .close = vp9_decode_free,
1875  .update_thread_context = ONLY_IF_THREADS_ENABLED(vp9_decode_update_thread_context),
1877  .bsfs = "vp9_superframe_split",
1878  .hw_configs = (const AVCodecHWConfigInternal *const []) {
1879 #if CONFIG_VP9_DXVA2_HWACCEL
1880  HWACCEL_DXVA2(vp9),
1881 #endif
1882 #if CONFIG_VP9_D3D11VA_HWACCEL
1883  HWACCEL_D3D11VA(vp9),
1884 #endif
1885 #if CONFIG_VP9_D3D11VA2_HWACCEL
1886  HWACCEL_D3D11VA2(vp9),
1887 #endif
1888 #if CONFIG_VP9_NVDEC_HWACCEL
1889  HWACCEL_NVDEC(vp9),
1890 #endif
1891 #if CONFIG_VP9_VAAPI_HWACCEL
1892  HWACCEL_VAAPI(vp9),
1893 #endif
1894 #if CONFIG_VP9_VDPAU_HWACCEL
1895  HWACCEL_VDPAU(vp9),
1896 #endif
1897  NULL
1898  },
1899 };
AVVideoEncParams::qp
int32_t qp
Base quantisation parameter for the frame.
Definition: video_enc_params.h:103
hwconfig.h
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1359
AVCodec
AVCodec.
Definition: codec.h:202
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:280
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:42
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:225
decode_tiles
static int decode_tiles(AVCodecContext *avctx, const uint8_t *data, int size)
Definition: vp9.c:1250
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
vp9_frame_alloc
static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
Definition: vp9.c:102
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
PRED_SWITCHABLE
@ PRED_SWITCHABLE
Definition: vp9shared.h:51
PRED_SINGLEREF
@ PRED_SINGLEREF
Definition: vp9shared.h:49
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:960
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:885
vpX_rac_is_end
static av_always_inline int vpX_rac_is_end(VP56RangeCoder *c)
vp5689 returns 1 if the end of the stream has been reached, 0 otherwise.
Definition: vp56.h:239
VP9Frame::segmentation_map
uint8_t * segmentation_map
Definition: vp9shared.h:62
VP9Frame
Definition: vp9shared.h:59
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:120
decode_sb
static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1079
ff_vp9_decoder
const AVCodec ff_vp9_decoder
Definition: vp9.c:1861
ff_vp9_adapt_probs
void ff_vp9_adapt_probs(VP9Context *s)
Definition: vp9prob.c:46
vp9_decode_flush
static void vp9_decode_flush(AVCodecContext *avctx)
Definition: vp9.c:1773
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:220
vp9_decode_frame
static int vp9_decode_frame(AVCodecContext *avctx, void *frame, int *got_frame, AVPacket *pkt)
Definition: vp9.c:1545
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:112
PRED_COMPREF
@ PRED_COMPREF
Definition: vp9shared.h:50
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:310
pixdesc.h
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:67
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:579
BlockPartition
BlockPartition
Definition: vp9shared.h:34
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:373
DC_PRED
@ DC_PRED
Definition: vp9.h:48
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:69
b
#define b
Definition: input.c:40
data
const char data[16]
Definition: mxf.c:143
update_size
static int update_size(AVCodecContext *avctx, int w, int h)
Definition: vp9.c:178
decode_sb_mem
static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1158
REF_FRAME_SEGMAP
#define REF_FRAME_SEGMAP
Definition: vp9shared.h:165
decode_frame_header
static int decode_frame_header(AVCodecContext *avctx, const uint8_t *data, int size, int *ref)
Definition: vp9.c:494
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:391
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:219
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:506
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
VP9_SYNCCODE
#define VP9_SYNCCODE
Definition: vp9.c:41
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
VP9Frame::hwaccel_priv_buf
AVBufferRef * hwaccel_priv_buf
Definition: vp9shared.h:66
VP9Filter
Definition: vp9dec.h:76
thread.h
ff_pthread_free
av_cold void ff_pthread_free(void *obj, const unsigned offsets[])
Definition: pthread.c:94
FILTER_SWITCHABLE
@ FILTER_SWITCHABLE
Definition: vp9.h:70
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
VP9Block
Definition: vp9dec.h:82
VP9Frame::tf
ThreadFrame tf
Definition: vp9shared.h:60
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:468
vp56_rac_get_prob_branchy
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
Definition: vp56.h:287
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:511
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:380
AVCOL_SPC_RESERVED
@ AVCOL_SPC_RESERVED
reserved for future use by ITU-T and ISO/IEC just like 15-255 are
Definition: pixfmt.h:509
AVHWAccel
Definition: avcodec.h:2039
VP9Frame::extradata
AVBufferRef * extradata
Definition: vp9shared.h:61
TX_SWITCHABLE
@ TX_SWITCHABLE
Definition: vp9.h:33
finish
static void finish(void)
Definition: movenc.c:342
vp8_rac_get
static av_always_inline int vp8_rac_get(VP56RangeCoder *c)
Definition: vp56.h:324
ff_vp9_ac_qlookup
const int16_t ff_vp9_ac_qlookup[3][256]
Definition: vp9data.c:334
AVVideoEncParams::delta_qp
int32_t delta_qp[4][2]
Quantisation parameter offset from the base (per-frame) qp for a given plane (first index) and AC/DC ...
Definition: video_enc_params.h:109
fail
#define fail()
Definition: checkasm.h:127
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:407
GetBitContext
Definition: get_bits.h:62
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:463
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:75
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
PARTITION_NONE
@ PARTITION_NONE
Definition: vp9shared.h:35
VP9Frame::hwaccel_picture_private
void * hwaccel_picture_private
Definition: vp9shared.h:67
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:73
vp9_decode_free
static av_cold int vp9_decode_free(AVCodecContext *avctx)
Definition: vp9.c:1224
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:99
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:394
avassert.h
ff_vp9_model_pareto8
const uint8_t ff_vp9_model_pareto8[256][8]
Definition: vp9data.c:1176
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:678
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1823
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:588
BL_8X8
@ BL_8X8
Definition: vp9shared.h:74
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:387
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
PARTITION_V
@ PARTITION_V
Definition: vp9shared.h:37
vp9_frame_ref
static int vp9_frame_ref(AVCodecContext *avctx, VP9Frame *dst, VP9Frame *src)
Definition: vp9.c:148
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:127
s
#define s(width, name)
Definition: cbs_vp9.c:257
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:512
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:361
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:218
vp9data.h
bits
uint8_t bits
Definition: vp3data.h:141
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:290
get_bits.h
VP56mv
Definition: vp56.h:68
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
ff_vp9dsp_init
av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact)
Definition: vp9dsp.c:86
decode012
static int decode012(GetBitContext *gb)
Definition: get_bits.h:832
ff_vp9_partition_tree
const int8_t ff_vp9_partition_tree[3][2]
Definition: vp9data.c:35
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
f
#define f(width, name)
Definition: cbs_vp9.c:255
vp56.h
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:113
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:593
NULL
#define NULL
Definition: coverity.c:32
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:967
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
VP9Context
Definition: vp9dec.h:94
vp8_rac_get_uint
static int vp8_rac_get_uint(VP56RangeCoder *c, int bits)
Definition: vp56.h:340
vp8_rac_get_tree
static av_always_inline int vp8_rac_get_tree(VP56RangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
Definition: vp56.h:396
REF_FRAME_MVPAIR
#define REF_FRAME_MVPAIR
Definition: vp9shared.h:164
AVHWAccel::end_frame
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:2140
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:499
profiles.h
src
#define src
Definition: vp8dsp.c:255
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:322
AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:393
pthread_internal.h
ONLY_IF_THREADS_ENABLED
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:156
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:392
PARTITION_SPLIT
@ PARTITION_SPLIT
Definition: vp9shared.h:38
atomic_load_explicit
#define atomic_load_explicit(object, order)
Definition: stdatomic.h:96
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
pthread_mutex_unlock
#define pthread_mutex_unlock(a)
Definition: ffprobe.c:68
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
ff_thread_release_buffer
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
Definition: pthread_frame.c:1057
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:60
vp9.h
VP9Frame::uses_2pass
int uses_2pass
Definition: vp9shared.h:64
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:374
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:327
pix_fmt_rgb
static enum AVPixelFormat pix_fmt_rgb[3]
Definition: libdav1d.c:64
REF_INVALID_SCALE
#define REF_INVALID_SCALE
Definition: vp9dec.h:40
read_colorspace_details
static int read_colorspace_details(AVCodecContext *avctx)
Definition: vp9.c:432
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:396
size
int size
Definition: twinvq_data.h:10344
vp9_alloc_entries
static int vp9_alloc_entries(AVCodecContext *avctx, int n)
Definition: vp9.c:83
atomic_fetch_add_explicit
#define atomic_fetch_add_explicit(object, operand, order)
Definition: stdatomic.h:149
free_buffers
static void free_buffers(VP9Context *s)
Definition: vp9.c:1215
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:398
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1452
AVCodecHWConfigInternal
Definition: hwconfig.h:29
TX_4X4
@ TX_4X4
Definition: vp9.h:28
update_block_buffers
static int update_block_buffers(AVCodecContext *avctx)
Definition: vp9.c:297
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:372
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:117
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:79
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:303
ff_vp56_init_range_decoder
int ff_vp56_init_range_decoder(VP56RangeCoder *c, const uint8_t *buf, int buf_size)
Definition: vp56rac.c:40
inv_recenter_nonneg
static av_always_inline int inv_recenter_nonneg(int v, int m)
Definition: vp9.c:364
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
FF_CODEC_CAP_SLICE_THREAD_HAS_MF
#define FF_CODEC_CAP_SLICE_THREAD_HAS_MF
Codec initializes slice-based threading with a main function.
Definition: internal.h:72
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1451
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:187
ff_slice_thread_execute_with_mainfunc
int ff_slice_thread_execute_with_mainfunc(AVCodecContext *avctx, action_func2 *func2, main_func *mainfunc, void *arg, int *ret, int job_count)
Definition: pthread_slice.c:121
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
Definition: pixfmt.h:513
assign
#define assign(var, type, n)
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:366
DEFINE_OFFSET_ARRAY
#define DEFINE_OFFSET_ARRAY(type, name, cnt_variable, mutexes, conds)
Definition: pthread_internal.h:61
AVCodecContext::properties
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1822
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:516
AVHWAccel::decode_slice
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:2129
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:50
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:408
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:505
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:120
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:263
get_sbits_inv
static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
Definition: vp9.c:358
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:209
update_thread_context
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. If there are inter-frame dependencies
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:508
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:593
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:562
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:271
avcodec.h
limit
static double limit(double x)
Definition: vf_pseudocolor.c:128
BL_64X64
@ BL_64X64
Definition: vp9shared.h:71
av_buffer_allocz
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:93
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
vp9_decode_init
static av_cold int vp9_decode_init(AVCodecContext *avctx)
Definition: vp9.c:1784
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:694
vp9_tile_data_free
static void vp9_tile_data_free(VP9TileData *td)
Definition: vp9.c:86
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
VP9mvrefPair
Definition: vp9shared.h:54
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:395
pthread_cond_signal
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
Definition: os2threads.h:152
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:351
VP9TileData
Definition: vp9dec.h:165
VP56RangeCoder
Definition: vp56.h:87
AVCodecContext
main external API structure.
Definition: avcodec.h:383
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1459
VP9Filter::mask
uint8_t mask[2][2][8][4]
Definition: vp9dec.h:79
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AVHWAccel::frame_priv_data_size
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
Definition: avcodec.h:2149
vp9_frame_unref
static void vp9_frame_unref(AVCodecContext *avctx, VP9Frame *f)
Definition: vp9.c:93
VP9Frame::mv
VP9mvrefPair * mv
Definition: vp9shared.h:63
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1525
ff_vp9_decode_block
void ff_vp9_decode_block(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl, enum BlockPartition bp)
Definition: vp9block.c:1260
NEARESTMV
@ NEARESTMV
Definition: vp9shared.h:42
ff_thread_get_format
FF_DISABLE_DEPRECATION_WARNINGS enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
Definition: pthread_frame.c:1021
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
BlockLevel
BlockLevel
Definition: vp9shared.h:70
AVCodecContext::export_side_data
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame,...
Definition: avcodec.h:1984
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
ff_pthread_init
av_cold int ff_pthread_init(void *obj, const unsigned offsets[])
Initialize/destroy a list of mutexes/conditions contained in a structure.
Definition: pthread.c:107
pthread_cond_wait
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
Definition: os2threads.h:192
vp9dec.h
AVHWAccel::start_frame
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:2101
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_vp9_default_kf_partition_probs
const uint8_t ff_vp9_default_kf_partition_probs[4][4][3]
Definition: vp9data.c:41
AV_VIDEO_ENC_PARAMS_VP9
@ AV_VIDEO_ENC_PARAMS_VP9
VP9 stores:
Definition: video_enc_params.h:44
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:86
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:272
ff_vp9_default_probs
const ProbContext ff_vp9_default_probs
Definition: vp9data.c:1435
FF_CODEC_CAP_ALLOCATE_PROGRESS
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
Definition: internal.h:77
CUR_FRAME
#define CUR_FRAME
Definition: vp9shared.h:163
vp9_export_enc_params
static int vp9_export_enc_params(VP9Context *s, VP9Frame *frame)
Definition: vp9.c:1493
AVPacket
This structure stores compressed data.
Definition: packet.h:350
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
PARTITION_H
@ PARTITION_H
Definition: vp9shared.h:36
ff_vp9_loopfilter_sb
void ff_vp9_loopfilter_sb(AVCodecContext *avctx, VP9Filter *lflvl, int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff)
Definition: vp9lpf.c:178
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
videodsp.h
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:73
d
d
Definition: ffmpeg_filter.c:156
HWACCEL_MAX
#define HWACCEL_MAX
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Definition: video_enc_params.h:143
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:397
h
h
Definition: vp9dsp_template.c:2038
atomic_init
#define atomic_init(obj, value)
Definition: stdatomic.h:33
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:507
ff_vp9_profiles
const AVProfile ff_vp9_profiles[]
Definition: profiles.c:139
video_enc_params.h
set_tile_offset
static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
Definition: vp9.c:1207
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2484
update_prob
static int update_prob(VP56RangeCoder *c, int p)
Definition: vp9.c:374
ff_vp9_dc_qlookup
const int16_t ff_vp9_dc_qlookup[3][256]
Definition: vp9data.c:231
pthread_mutex_lock
#define pthread_mutex_lock(a)
Definition: ffprobe.c:64
ff_vp9_default_coef_probs
const uint8_t ff_vp9_default_coef_probs[4][2][2][6][6][3]
Definition: vp9data.c:1540